diff --git a/src/commands/account.ts b/src/commands/account.ts index 52a65ded5..41feb89b2 100644 --- a/src/commands/account.ts +++ b/src/commands/account.ts @@ -170,7 +170,7 @@ export class AccountCommand extends BaseCommand { const namespace = await resolveNamespaceFromDeployment(this.localConfig, this.configManager, task); const config = {namespace}; - if (!(await this.k8.namespaces().has(namespace))) { + if (!(await this.k8Factory.default().namespaces().has(namespace))) { throw new SoloError(`namespace ${namespace.name} does not exist`); } @@ -191,7 +191,10 @@ export class AccountCommand extends BaseCommand { title: 'Prepare for account key updates', task: async ctx => { const namespace = await resolveNamespaceFromDeployment(this.localConfig, this.configManager, task); - const secrets = await self.k8.secrets().list(namespace, ['solo.hedera.com/account-id']); + const secrets = await self.k8Factory + .default() + .secrets() + .list(namespace, ['solo.hedera.com/account-id']); ctx.updateSecrets = secrets.length > 0; ctx.accountsBatchedSet = self.accountManager.batchAccounts(this.systemAccounts); @@ -334,7 +337,7 @@ export class AccountCommand extends BaseCommand { config.amount = flags.amount.definition.defaultValue as number; } - if (!(await this.k8.namespaces().has(config.namespace))) { + if (!(await this.k8Factory.default().namespaces().has(config.namespace))) { throw new SoloError(`namespace ${config.namespace} does not exist`); } @@ -412,7 +415,7 @@ export class AccountCommand extends BaseCommand { ed25519PrivateKey: self.configManager.getFlag(flags.ed25519PrivateKey) as string, }; - if (!(await this.k8.namespaces().has(config.namespace))) { + if (!(await this.k8Factory.default().namespaces().has(config.namespace))) { throw new SoloError(`namespace ${config.namespace} does not exist`); } @@ -494,7 +497,7 @@ export class AccountCommand extends BaseCommand { privateKey: self.configManager.getFlag(flags.privateKey) as boolean, }; - if (!(await this.k8.namespaces().has(config.namespace))) { + if (!(await this.k8Factory.default().namespaces().has(config.namespace))) { throw new SoloError(`namespace ${config.namespace} does not exist`); } diff --git a/src/commands/base.ts b/src/commands/base.ts index 1a8e8051a..d1b7d678d 100644 --- a/src/commands/base.ts +++ b/src/commands/base.ts @@ -9,7 +9,7 @@ import {type LeaseManager} from '../core/lease/lease_manager.js'; import {type LocalConfig} from '../core/config/local_config.js'; import {type RemoteConfigManager} from '../core/config/remote/remote_config_manager.js'; import {type Helm} from '../core/helm.js'; -import {type K8} from '../core/kube/k8.js'; +import {type K8Factory} from '../core/kube/k8_factory.js'; import {type ChartManager} from '../core/chart_manager.js'; import {type ConfigManager} from '../core/config_manager.js'; import {type DependencyManager} from '../core/dependency_managers/index.js'; @@ -28,7 +28,7 @@ export interface CommandHandlers { export abstract class BaseCommand extends ShellRunner { protected readonly helm: Helm; - protected readonly k8: K8; + protected readonly k8Factory: K8Factory; protected readonly chartManager: ChartManager; protected readonly configManager: ConfigManager; protected readonly depManager: DependencyManager; @@ -39,7 +39,7 @@ export abstract class BaseCommand extends ShellRunner { constructor(opts: Opts) { if (!opts || !opts.helm) throw new Error('An instance of core/Helm is required'); - if (!opts || !opts.k8) throw new Error('An instance of core/K8 is required'); + if (!opts || !opts.k8Factory) throw new Error('An instance of core/K8Factory is required'); if (!opts || !opts.chartManager) throw new Error('An instance of core/ChartManager is required'); if (!opts || !opts.configManager) throw new Error('An instance of core/ConfigManager is required'); if (!opts || !opts.depManager) throw new Error('An instance of core/DependencyManager is required'); @@ -49,7 +49,7 @@ export abstract class BaseCommand extends ShellRunner { super(); this.helm = opts.helm; - this.k8 = opts.k8; + this.k8Factory = opts.k8Factory; this.chartManager = opts.chartManager; this.configManager = opts.configManager; this.depManager = opts.depManager; @@ -177,8 +177,8 @@ export abstract class BaseCommand extends ShellRunner { return this._configMaps.get(configName).getUnusedConfigs(); } - getK8() { - return this.k8; + getK8Factory() { + return this.k8Factory; } getLocalConfig() { diff --git a/src/commands/cluster/handlers.ts b/src/commands/cluster/handlers.ts index 1f7429166..0b55eba19 100644 --- a/src/commands/cluster/handlers.ts +++ b/src/commands/cluster/handlers.ts @@ -31,7 +31,7 @@ export class ClusterCommandHandlers implements CommandHandlers { [ this.tasks.initialize(argv, connectConfigBuilder.bind(this)), this.parent.setupHomeDirectoryTask(), - this.parent.getLocalConfig().promptLocalConfigTask(this.parent.getK8()), + this.parent.getLocalConfig().promptLocalConfigTask(this.parent.getK8Factory()), this.tasks.selectContext(), ListrRemoteConfig.loadRemoteConfig(this.parent, argv), this.tasks.readClustersFromRemoteConfig(argv), diff --git a/src/commands/cluster/index.ts b/src/commands/cluster/index.ts index aa003addf..f97dd7d75 100644 --- a/src/commands/cluster/index.ts +++ b/src/commands/cluster/index.ts @@ -19,7 +19,11 @@ export class ClusterCommand extends BaseCommand { constructor(opts: Opts) { super(opts); - this.handlers = new ClusterCommandHandlers(this, new ClusterCommandTasks(this, this.k8), this.remoteConfigManager); + this.handlers = new ClusterCommandHandlers( + this, + new ClusterCommandTasks(this, this.k8Factory), + this.remoteConfigManager, + ); } getCommandDefinition() { diff --git a/src/commands/cluster/tasks.ts b/src/commands/cluster/tasks.ts index 0cc12092d..f7968ab36 100644 --- a/src/commands/cluster/tasks.ts +++ b/src/commands/cluster/tasks.ts @@ -15,7 +15,7 @@ import {ErrorMessages} from '../../core/error_messages.js'; import {SoloError} from '../../core/errors.js'; import {RemoteConfigManager} from '../../core/config/remote/remote_config_manager.js'; import {type RemoteConfigDataWrapper} from '../../core/config/remote/remote_config_data_wrapper.js'; -import {type K8} from '../../core/kube/k8.js'; +import {type K8Factory} from '../../core/kube/k8_factory.js'; import {type SoloListrTask, type SoloListrTaskWrapper} from '../../types/index.js'; import {type SelectClusterContextContext} from './configs.js'; import {type DeploymentName} from '../../core/config/remote/types.js'; @@ -32,7 +32,7 @@ export class ClusterCommandTasks { constructor( parent, - private readonly k8: K8, + private readonly k8Factory: K8Factory, ) { this.parent = parent; } @@ -46,14 +46,14 @@ export class ClusterCommandTasks { if (!context) { const isQuiet = self.parent.getConfigManager().getFlag(flags.quiet); if (isQuiet) { - context = self.parent.getK8().contexts().readCurrent(); + context = self.parent.getK8Factory().default().contexts().readCurrent(); } else { context = await self.promptForContext(parentTask, cluster); } localConfig.clusterRefs[cluster] = context; } - if (!(await self.parent.getK8().contexts().testContextConnection(context))) { + if (!(await self.parent.getK8Factory().default().contexts().testContextConnection(context))) { subTask.title = `${subTask.title} - ${chalk.red('Cluster connection failed')}`; throw new SoloError(`${ErrorMessages.INVALID_CONTEXT_FOR_CLUSTER_DETAILED(context, cluster)}`); } @@ -72,7 +72,7 @@ export class ClusterCommandTasks { title: `Pull and validate remote configuration for cluster: ${chalk.cyan(cluster)}`, task: async (_, subTask: ListrTaskWrapper) => { const context = localConfig.clusterRefs[cluster]; - self.parent.getK8().contexts().updateCurrent(context); + self.parent.getK8Factory().default().contexts().updateCurrent(context); const remoteConfigFromOtherCluster = await self.parent.getRemoteConfigManager().get(); if (!RemoteConfigManager.compare(currentRemoteConfig, remoteConfigFromOtherCluster)) { throw new SoloError(ErrorMessages.REMOTE_CONFIGS_DO_NOT_MATCH(currentClusterName, cluster)); @@ -87,7 +87,7 @@ export class ClusterCommandTasks { title: 'Read clusters from remote config', task: async (ctx, task) => { const localConfig = this.parent.getLocalConfig(); - const currentClusterName = this.parent.getK8().clusters().readCurrent(); + const currentClusterName = this.parent.getK8Factory().default().clusters().readCurrent(); const currentRemoteConfig: RemoteConfigDataWrapper = await this.parent.getRemoteConfigManager().get(); const subTasks = []; const remoteConfigClusters = Object.keys(currentRemoteConfig.clusters); @@ -161,7 +161,7 @@ export class ClusterCommandTasks { } else if (!localConfig.clusterRefs[cluster]) { // In quiet mode, use the currently selected context to update the mapping if (isQuiet) { - localConfig.clusterRefs[cluster] = this.parent.getK8().contexts().readCurrent(); + localConfig.clusterRefs[cluster] = this.parent.getK8Factory().default().contexts().readCurrent(); } // Prompt the user to select a context if mapping value is missing @@ -184,7 +184,7 @@ export class ClusterCommandTasks { ) { let selectedContext; if (isQuiet) { - selectedContext = this.parent.getK8().contexts().readCurrent(); + selectedContext = this.parent.getK8Factory().default().contexts().readCurrent(); } else { selectedContext = await this.promptForContext(task, selectedCluster); localConfig.clusterRefs[selectedCluster] = selectedContext; @@ -193,7 +193,7 @@ export class ClusterCommandTasks { } private async promptForContext(task: SoloListrTaskWrapper, cluster: string) { - const kubeContexts = this.parent.getK8().contexts().list(); + const kubeContexts = this.parent.getK8Factory().default().contexts().list(); return flags.context.prompt(task, kubeContexts, cluster); } @@ -305,8 +305,8 @@ export class ClusterCommandTasks { else { // Add the deployment to the LocalConfig with the currently selected cluster and context in KubeConfig if (isQuiet) { - selectedContext = this.parent.getK8().contexts().readCurrent(); - selectedCluster = this.parent.getK8().clusters().readCurrent(); + selectedContext = this.parent.getK8Factory().default().contexts().readCurrent(); + selectedCluster = this.parent.getK8Factory().default().clusters().readCurrent(); localConfig.deployments[deploymentName] = { clusters: [selectedCluster], namespace: namespace ? namespace.name : '', @@ -334,11 +334,15 @@ export class ClusterCommandTasks { } } - const connectionValid = await this.parent.getK8().contexts().testContextConnection(selectedContext); + const connectionValid = await this.parent + .getK8Factory() + .default() + .contexts() + .testContextConnection(selectedContext); if (!connectionValid) { throw new SoloError(ErrorMessages.INVALID_CONTEXT_FOR_CLUSTER(selectedContext, selectedCluster)); } - this.parent.getK8().contexts().updateCurrent(selectedContext); + this.parent.getK8Factory().default().contexts().updateCurrent(selectedContext); this.parent.getConfigManager().setFlag(flags.context, selectedContext); }, }; @@ -360,14 +364,14 @@ export class ClusterCommandTasks { showClusterList() { return new Task('List all available clusters', async (ctx: any, task: ListrTaskWrapper) => { - this.parent.logger.showList('Clusters', this.parent.getK8().clusters().list()); + this.parent.logger.showList('Clusters', this.parent.getK8Factory().default().clusters().list()); }); } getClusterInfo() { return new Task('Get cluster info', async (ctx: any, task: ListrTaskWrapper) => { try { - const clusterName = this.parent.getK8().clusters().readCurrent(); + const clusterName = this.parent.getK8Factory().default().clusters().readCurrent(); this.parent.logger.showUser(`Cluster Name (${clusterName})`); this.parent.logger.showUser('\n'); } catch (e: Error | unknown) { diff --git a/src/commands/deployment.ts b/src/commands/deployment.ts index 4bf7a0579..dee65102d 100644 --- a/src/commands/deployment.ts +++ b/src/commands/deployment.ts @@ -27,7 +27,7 @@ export class DeploymentCommand extends BaseCommand { constructor(opts: Opts) { super(opts); - this.tasks = new ClusterCommandTasks(this, this.k8); + this.tasks = new ClusterCommandTasks(this, this.k8Factory); } private static get DEPLOY_FLAGS_LIST(): CommandFlag[] { @@ -88,7 +88,7 @@ export class DeploymentCommand extends BaseCommand { }, }, this.setupHomeDirectoryTask(), - this.localConfig.promptLocalConfigTask(self.k8), + this.localConfig.promptLocalConfigTask(self.k8Factory), { title: 'Add new deployment to local config', task: async (ctx, task) => { @@ -107,7 +107,7 @@ export class DeploymentCommand extends BaseCommand { title: 'Validate context', task: async (ctx, task) => { ctx.config.context = ctx.config.context ?? self.configManager.getFlag(flags.context); - const availableContexts = self.k8.contexts().list(); + const availableContexts = self.k8Factory.default().contexts().list(); if (availableContexts.includes(ctx.config.context)) { task.title += chalk.green(`- validated context ${ctx.config.context}`); @@ -132,7 +132,7 @@ export class DeploymentCommand extends BaseCommand { subTasks.push({ title: `Testing connection to cluster: ${chalk.cyan(cluster)}`, task: async (_, task) => { - if (!(await self.k8.contexts().testContextConnection(context))) { + if (!(await self.k8Factory.default().contexts().testContextConnection(context))) { task.title = `${task.title} - ${chalk.red('Cluster connection failed')}`; throw new SoloError(`Cluster connection failed for: ${cluster}`); @@ -199,9 +199,9 @@ export class DeploymentCommand extends BaseCommand { const context = self.localConfig.clusterRefs[clusterName]; - self.k8.contexts().updateCurrent(context); + self.k8Factory.default().contexts().updateCurrent(context); - const namespaces = await self.k8.namespaces().list(); + const namespaces = await self.k8Factory.default().namespaces().list(); const namespacesWithRemoteConfigs: NamespaceNameAsString[] = []; for (const namespace of namespaces) { diff --git a/src/commands/explorer.ts b/src/commands/explorer.ts index 9eed84e0c..101ff22df 100644 --- a/src/commands/explorer.ts +++ b/src/commands/explorer.ts @@ -190,7 +190,7 @@ export class ExplorerCommand extends BaseCommand { ctx.config.valuesArg += await self.prepareValuesArg(ctx.config); - if (!(await self.k8.namespaces().has(ctx.config.namespace))) { + if (!(await self.k8Factory.default().namespaces().has(ctx.config.namespace))) { throw new SoloError(`namespace ${ctx.config.namespace} does not exist`); } @@ -226,7 +226,8 @@ export class ExplorerCommand extends BaseCommand { } // wait cert-manager to be ready to proceed, otherwise may get error of "failed calling webhook" - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( constants.DEFAULT_CERT_MANAGER_NAMESPACE, @@ -251,14 +252,18 @@ export class ExplorerCommand extends BaseCommand { if (config.enableIngress) { // patch ingressClassName of mirror ingress so it can be recognized by haproxy ingress controller - await this.k8.ingresses().update(config.namespace, constants.MIRROR_NODE_RELEASE_NAME, { - spec: { - ingressClassName: `${config.namespace}-hedera-explorer-ingress-class`, - }, - }); + await this.k8Factory + .default() + .ingresses() + .update(config.namespace, constants.MIRROR_NODE_RELEASE_NAME, { + spec: { + ingressClassName: `${config.namespace}-hedera-explorer-ingress-class`, + }, + }); // to support GRPC over HTTP/2 - await this.k8 + await this.k8Factory + .default() .configMaps() .update(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART + '-haproxy-ingress', { 'backend-protocol': 'h2', @@ -286,19 +291,23 @@ export class ExplorerCommand extends BaseCommand { // patch explorer ingress to use h1 protocol, haproxy ingress controller default backend protocol is h2 // to support grpc over http/2 - await this.k8.ingresses().update(config.namespace, constants.HEDERA_EXPLORER_RELEASE_NAME, { - metadata: { - annotations: { - 'haproxy-ingress.github.io/backend-protocol': 'h1', + await this.k8Factory + .default() + .ingresses() + .update(config.namespace, constants.HEDERA_EXPLORER_RELEASE_NAME, { + metadata: { + annotations: { + 'haproxy-ingress.github.io/backend-protocol': 'h1', + }, }, - }, - }); + }); }, }, { title: 'Check explorer pod is ready', task: async ctx => { - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( ctx.config.namespace, @@ -311,7 +320,8 @@ export class ExplorerCommand extends BaseCommand { { title: 'Check haproxy ingress controller pod is ready', task: async () => { - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( constants.SOLO_SETUP_NAMESPACE, @@ -378,7 +388,7 @@ export class ExplorerCommand extends BaseCommand { self.configManager.update(argv); const namespace = await resolveNamespaceFromDeployment(this.localConfig, this.configManager, task); - if (!(await self.k8.namespaces().has(namespace))) { + if (!(await self.k8Factory.default().namespaces().has(namespace))) { throw new SoloError(`namespace ${namespace} does not exist`); } diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index ee0293faa..46989ceb7 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -244,7 +244,10 @@ export class MirrorNodeCommand extends BaseCommand { if (ctx.config.pinger) { const startAccId = constants.HEDERA_NODE_ACCOUNT_ID_START; - const networkPods = await this.k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const networkPods = await this.k8Factory + .default() + .pods() + .list(namespace, ['solo.hedera.com/type=network-node']); if (networkPods.length) { const pod = networkPods[0]; @@ -261,7 +264,8 @@ export class MirrorNodeCommand extends BaseCommand { } else { try { const namespace = await resolveNamespaceFromDeployment(this.localConfig, this.configManager, task); - const secrets = await this.k8 + const secrets = await this.k8Factory + .default() .secrets() .list(namespace, [`solo.hedera.com/account-id=${operatorId}`]); if (secrets.length === 0) { @@ -319,7 +323,7 @@ export class MirrorNodeCommand extends BaseCommand { } } - if (!(await self.k8.namespaces().has(ctx.config.namespace))) { + if (!(await self.k8Factory.default().namespaces().has(ctx.config.namespace))) { throw new SoloError(`namespace ${ctx.config.namespace} does not exist`); } @@ -366,7 +370,8 @@ export class MirrorNodeCommand extends BaseCommand { { title: 'Check Postgres DB', task: async ctx => - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( ctx.config.namespace, @@ -379,7 +384,8 @@ export class MirrorNodeCommand extends BaseCommand { { title: 'Check REST API', task: async ctx => - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( ctx.config.namespace, @@ -391,7 +397,8 @@ export class MirrorNodeCommand extends BaseCommand { { title: 'Check GRPC', task: async ctx => - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( ctx.config.namespace, @@ -403,7 +410,8 @@ export class MirrorNodeCommand extends BaseCommand { { title: 'Check Monitor', task: async ctx => - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( ctx.config.namespace, @@ -415,7 +423,8 @@ export class MirrorNodeCommand extends BaseCommand { { title: 'Check Importer', task: async ctx => - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( ctx.config.namespace, @@ -485,7 +494,10 @@ export class MirrorNodeCommand extends BaseCommand { return; //! stop the execution } - const pods = await this.k8.pods().list(namespace, ['app.kubernetes.io/name=postgres']); + const pods = await this.k8Factory + .default() + .pods() + .list(namespace, ['app.kubernetes.io/name=postgres']); if (pods.length === 0) { throw new SoloError('postgres pod not found'); } @@ -493,7 +505,8 @@ export class MirrorNodeCommand extends BaseCommand { const postgresContainerName = ContainerName.of('postgresql'); const postgresPodRef = PodRef.of(namespace, postgresPodName); const containerRef = ContainerRef.of(postgresPodRef, postgresContainerName); - const mirrorEnvVars = await self.k8 + const mirrorEnvVars = await self.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer('/bin/bash -c printenv'); @@ -511,7 +524,8 @@ export class MirrorNodeCommand extends BaseCommand { 'HEDERA_MIRROR_IMPORTER_DB_NAME', ); - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer([ @@ -584,7 +598,7 @@ export class MirrorNodeCommand extends BaseCommand { self.configManager.update(argv); const namespace = await resolveNamespaceFromDeployment(this.localConfig, this.configManager, task); - if (!(await self.k8.namespaces().has(namespace))) { + if (!(await self.k8Factory.default().namespaces().has(namespace))) { throw new SoloError(`namespace ${namespace} does not exist`); } @@ -615,13 +629,17 @@ export class MirrorNodeCommand extends BaseCommand { task: async ctx => { // filtering postgres and redis PVCs using instance labels // since they have different name or component labels - const pvcs = await self.k8 + const pvcs = await self.k8Factory + .default() .pvcs() .list(ctx.config.namespace, [`app.kubernetes.io/instance=${constants.MIRROR_NODE_RELEASE_NAME}`]); if (pvcs) { for (const pvc of pvcs) { - await self.k8.pvcs().delete(PvcRef.of(ctx.config.namespace, PvcName.of(pvc))); + await self.k8Factory + .default() + .pvcs() + .delete(PvcRef.of(ctx.config.namespace, PvcName.of(pvc))); } } }, diff --git a/src/commands/network.ts b/src/commands/network.ts index 7dda6cf85..dd6cf53e4 100644 --- a/src/commands/network.ts +++ b/src/commands/network.ts @@ -84,7 +84,7 @@ export class NetworkCommand extends BaseCommand { constructor(opts: Opts) { super(opts); - if (!opts || !opts.k8) throw new Error('An instance of core/K8 is required'); + if (!opts || !opts.k8Factory) throw new Error('An instance of core/K8Factory is required'); if (!opts || !opts.keyManager) throw new IllegalArgumentError('An instance of core/KeyManager is required', opts.keyManager); if (!opts || !opts.platformInstaller) @@ -156,7 +156,8 @@ export class NetworkCommand extends BaseCommand { // Generating new minio credentials const envString = `MINIO_ROOT_USER=${minioAccessKey}\nMINIO_ROOT_PASSWORD=${minioSecretKey}`; minioData['config.env'] = Base64.encode(envString); - const isMinioSecretCreated = await this.k8 + const isMinioSecretCreated = await this.k8Factory + .default() .secrets() .createOrReplace(namespace, constants.MINIO_SECRET_NAME, SecretType.OPAQUE, minioData, undefined); if (!isMinioSecretCreated) { @@ -188,7 +189,8 @@ export class NetworkCommand extends BaseCommand { cloudData['S3_SECRET_KEY'] = Base64.encode(minioSecretKey); } - const isCloudSecretCreated = await this.k8 + const isCloudSecretCreated = await this.k8Factory + .default() .secrets() .createOrReplace(namespace, constants.UPLOADER_SECRET_NAME, SecretType.OPAQUE, cloudData, undefined); if (!isCloudSecretCreated) { @@ -201,7 +203,8 @@ export class NetworkCommand extends BaseCommand { const backupData = {}; const googleCredential = fs.readFileSync(config.googleCredential, 'utf8'); backupData['saJson'] = Base64.encode(googleCredential); - const isBackupSecretCreated = await this.k8 + const isBackupSecretCreated = await this.k8Factory + .default() .secrets() .createOrReplace(namespace, constants.BACKUP_SECRET_NAME, SecretType.OPAQUE, backupData, undefined); if (!isBackupSecretCreated) { @@ -419,8 +422,8 @@ export class NetworkCommand extends BaseCommand { config.valuesArg = await this.prepareValuesArg(config); config.namespace = namespace; - if (!(await this.k8.namespaces().has(namespace))) { - await this.k8.namespaces().create(namespace); + if (!(await this.k8Factory.default().namespaces().has(namespace))) { + await this.k8Factory.default().namespaces().create(namespace); } // prepare staging keys directory @@ -457,22 +460,25 @@ export class NetworkCommand extends BaseCommand { await self.chartManager.uninstall(ctx.config.namespace, constants.SOLO_DEPLOYMENT_CHART); if (ctx.config.deletePvcs) { - const pvcs = await self.k8.pvcs().list(ctx.config.namespace, []); + const pvcs = await self.k8Factory.default().pvcs().list(ctx.config.namespace, []); task.title = `Deleting PVCs in namespace ${ctx.config.namespace}`; if (pvcs) { for (const pvc of pvcs) { - await self.k8.pvcs().delete(PvcRef.of(ctx.config.namespace, PvcName.of(pvc))); + await self.k8Factory + .default() + .pvcs() + .delete(PvcRef.of(ctx.config.namespace, PvcName.of(pvc))); } } } if (ctx.config.deleteSecrets) { task.title = `Deleting secrets in namespace ${ctx.config.namespace}`; - const secrets = await self.k8.secrets().list(ctx.config.namespace); + const secrets = await self.k8Factory.default().secrets().list(ctx.config.namespace); if (secrets) { for (const secret of secrets) { - await self.k8.secrets().delete(ctx.config.namespace, secret.name); + await self.k8Factory.default().secrets().delete(ctx.config.namespace, secret.name); } } } @@ -590,7 +596,8 @@ export class NetworkCommand extends BaseCommand { subTasks.push({ title: `Check Node: ${chalk.yellow(nodeAlias)}`, task: async () => - await self.k8 + await self.k8Factory + .default() .pods() .waitForRunningPhase( config.namespace, @@ -621,7 +628,8 @@ export class NetworkCommand extends BaseCommand { subTasks.push({ title: `Check HAProxy for: ${chalk.yellow(nodeAlias)}`, task: async () => - await self.k8 + await self.k8Factory + .default() .pods() .waitForRunningPhase( config.namespace, @@ -637,7 +645,8 @@ export class NetworkCommand extends BaseCommand { subTasks.push({ title: `Check Envoy Proxy for: ${chalk.yellow(nodeAlias)}`, task: async () => - await self.k8 + await self.k8Factory + .default() .pods() .waitForRunningPhase( ctx.config.namespace, @@ -666,7 +675,8 @@ export class NetworkCommand extends BaseCommand { subTasks.push({ title: 'Check MinIO', task: async ctx => - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( ctx.config.namespace, @@ -768,7 +778,7 @@ export class NetworkCommand extends BaseCommand { networkDestroySuccess = false; if (ctx.config.deletePvcs && ctx.config.deleteSecrets && ctx.config.force) { - self.k8.namespaces().delete(ctx.config.namespace); + self.k8Factory.default().namespaces().delete(ctx.config.namespace); } else { // If the namespace is not being deleted, // remove all components data from the remote configuration @@ -837,7 +847,8 @@ export class NetworkCommand extends BaseCommand { title: 'Waiting for network pods to be running', task: async ctx => { const config = ctx.config; - await this.k8 + await this.k8Factory + .default() .pods() .waitForRunningPhase( config.namespace, diff --git a/src/commands/node/configs.ts b/src/commands/node/configs.ts index a3d5213c9..590808037 100644 --- a/src/commands/node/configs.ts +++ b/src/commands/node/configs.ts @@ -17,6 +17,7 @@ import {type NetworkNodeServices} from '../../core/network_node_services.js'; import {type NodeAddConfigClass} from './node_add_config.js'; import {type NamespaceName} from '../../core/kube/resources/namespace/namespace_name.js'; import {type PodRef} from '../../core/kube/resources/pod/pod_ref.js'; +import {type K8Factory} from '../../core/kube/k8_factory.js'; export const PREPARE_UPGRADE_CONFIGS_NAME = 'prepareUpgradeConfig'; export const DOWNLOAD_GENERATED_FILES_CONFIGS_NAME = 'downloadGeneratedFilesConfig'; @@ -29,13 +30,13 @@ export const KEYS_CONFIGS_NAME = 'keyConfigs'; export const SETUP_CONFIGS_NAME = 'setupConfigs'; export const START_CONFIGS_NAME = 'startConfigs'; -const initializeSetup = async (config, k8) => { +const initializeSetup = async (config: any, k8Factory: K8Factory) => { // compute other config parameters config.keysDir = path.join(validatePath(config.cacheDir), 'keys'); config.stagingDir = Templates.renderStagingDir(config.cacheDir, config.releaseTag); config.stagingKeysDir = path.join(validatePath(config.stagingDir), 'keys'); - if (!(await k8.namespaces().has(config.namespace))) { + if (!(await k8Factory.default().namespaces().has(config.namespace))) { throw new SoloError(`namespace ${config.namespace} does not exist`); } @@ -59,7 +60,7 @@ export const prepareUpgradeConfigBuilder = async function (argv, ctx, task) { config.namespace = await resolveNamespaceFromDeployment(this.parent.localConfig, this.configManager, task); - await initializeSetup(config, this.k8); + await initializeSetup(config, this.k8Factory); config.nodeClient = await this.accountManager.loadNodeClient(config.namespace); const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace); @@ -78,7 +79,7 @@ export const downloadGeneratedFilesConfigBuilder = async function (argv, ctx, ta config.namespace = await resolveNamespaceFromDeployment(this.parent.localConfig, this.configManager, task); config.existingNodeAliases = []; - await initializeSetup(config, this.k8); + await initializeSetup(config, this.k8Factory); return config; }; @@ -100,7 +101,7 @@ export const upgradeConfigBuilder = async function (argv, ctx, task, shouldLoadN config.existingNodeAliases = []; config.nodeAliases = helpers.parseNodeAliases(config.nodeAliasesUnparsed); - await initializeSetup(config, this.k8); + await initializeSetup(config, this.k8Factory); // set config in the context for later tasks to use ctx.config = config; @@ -138,7 +139,7 @@ export const updateConfigBuilder = async function (argv, ctx, task, shouldLoadNo config.curDate = new Date(); config.existingNodeAliases = []; - await initializeSetup(config, this.k8); + await initializeSetup(config, this.k8Factory); // set config in the context for later tasks to use ctx.config = config; @@ -183,7 +184,7 @@ export const deleteConfigBuilder = async function (argv, ctx, task, shouldLoadNo config.existingNodeAliases = []; config.namespace = await resolveNamespaceFromDeployment(this.parent.localConfig, this.configManager, task); - await initializeSetup(config, this.k8); + await initializeSetup(config, this.k8Factory); // set config in the context for later tasks to use ctx.config = config; @@ -234,7 +235,7 @@ export const addConfigBuilder = async function (argv, ctx, task, shouldLoadNodeC config.curDate = new Date(); config.existingNodeAliases = []; - await initializeSetup(config, this.k8); + await initializeSetup(config, this.k8Factory); // set config in the context for later tasks to use ctx.config = config; @@ -293,7 +294,7 @@ export const refreshConfigBuilder = async function (argv, ctx, task) { ctx.config.namespace = await resolveNamespaceFromDeployment(this.parent.localConfig, this.configManager, task); ctx.config.nodeAliases = helpers.parseNodeAliases(ctx.config.nodeAliasesUnparsed); - await initializeSetup(ctx.config, this.k8); + await initializeSetup(ctx.config, this.k8Factory); return ctx.config; }; @@ -323,7 +324,7 @@ export const stopConfigBuilder = async function (argv, ctx, task) { deployment: this.configManager.getFlag(flags.deployment), }; - if (!(await this.k8.namespaces().has(ctx.config.namespace))) { + if (!(await this.k8Factory.default().namespaces().has(ctx.config.namespace))) { throw new SoloError(`namespace ${ctx.config.namespace} does not exist`); } @@ -334,7 +335,7 @@ export const startConfigBuilder = async function (argv, ctx, task) { const config = this.getConfig(START_CONFIGS_NAME, argv.flags, ['nodeAliases', 'namespace']) as NodeStartConfigClass; config.namespace = await resolveNamespaceFromDeployment(this.parent.localConfig, this.configManager, task); - if (!(await this.k8.namespaces().has(config.namespace))) { + if (!(await this.k8Factory.default().namespaces().has(config.namespace))) { throw new SoloError(`namespace ${config.namespace} does not exist`); } @@ -353,7 +354,7 @@ export const setupConfigBuilder = async function (argv, ctx, task) { config.namespace = await resolveNamespaceFromDeployment(this.parent.localConfig, this.configManager, task); config.nodeAliases = helpers.parseNodeAliases(config.nodeAliasesUnparsed); - await initializeSetup(config, this.k8); + await initializeSetup(config, this.k8Factory); // set config in the context for later tasks to use ctx.config = config; diff --git a/src/commands/node/handlers.ts b/src/commands/node/handlers.ts index a755bff11..835911bda 100644 --- a/src/commands/node/handlers.ts +++ b/src/commands/node/handlers.ts @@ -23,7 +23,7 @@ import * as constants from '../../core/constants.js'; import {type AccountManager} from '../../core/account_manager.js'; import {type ConfigManager} from '../../core/config_manager.js'; import {type PlatformInstaller} from '../../core/platform_installer.js'; -import {type K8} from '../../core/kube/k8.js'; +import {type K8Factory} from '../../core/kube/k8_factory.js'; import {type LeaseManager} from '../../core/lease/lease_manager.js'; import {type RemoteConfigManager} from '../../core/config/remote/remote_config_manager.js'; import {IllegalArgumentError, SoloError} from '../../core/errors.js'; @@ -48,7 +48,7 @@ export class NodeCommandHandlers implements CommandHandlers { private readonly configManager: ConfigManager; private readonly platformInstaller: PlatformInstaller; private readonly logger: SoloLogger; - private readonly k8: K8; + private readonly k8Factory: K8Factory; private readonly tasks: NodeCommandTasks; private readonly leaseManager: LeaseManager; public readonly remoteConfigManager: RemoteConfigManager; @@ -64,7 +64,7 @@ export class NodeCommandHandlers implements CommandHandlers { if (!opts || !opts.configManager) throw new Error('An instance of core/ConfigManager is required'); if (!opts || !opts.logger) throw new Error('An instance of core/Logger is required'); if (!opts || !opts.tasks) throw new Error('An instance of NodeCommandTasks is required'); - if (!opts || !opts.k8) throw new Error('An instance of core/K8 is required'); + if (!opts || !opts.k8Factory) throw new Error('An instance of core/K8Factory is required'); if (!opts || !opts.platformInstaller) throw new IllegalArgumentError('An instance of core/PlatformInstaller is required', opts.platformInstaller); @@ -72,7 +72,7 @@ export class NodeCommandHandlers implements CommandHandlers { this.tasks = opts.tasks; this.accountManager = opts.accountManager; this.configManager = opts.configManager; - this.k8 = opts.k8; + this.k8Factory = opts.k8Factory; this.platformInstaller = opts.platformInstaller; this.leaseManager = opts.leaseManager; this.remoteConfigManager = opts.remoteConfigManager; diff --git a/src/commands/node/index.ts b/src/commands/node/index.ts index 92a839f56..3e45b6d2a 100644 --- a/src/commands/node/index.ts +++ b/src/commands/node/index.ts @@ -46,7 +46,7 @@ export class NodeCommand extends BaseCommand { logger: opts.logger, platformInstaller: opts.platformInstaller, profileManager: opts.profileManager, - k8: opts.k8, + k8Factory: opts.k8Factory, keyManager: opts.keyManager, chartManager: opts.chartManager, certificateManager: opts.certificateManager, @@ -58,7 +58,7 @@ export class NodeCommand extends BaseCommand { configManager: opts.configManager, platformInstaller: opts.platformInstaller, logger: opts.logger, - k8: opts.k8, + k8Factory: opts.k8Factory, tasks: this.tasks, parent: this, leaseManager: opts.leaseManager, @@ -69,13 +69,14 @@ export class NodeCommand extends BaseCommand { /** * stops and closes the port forwards * - calls the accountManager.close() - * - for all portForwards, calls k8.pods().readByRef(null).stopPortForward(srv) + * - for all portForwards, calls k8Factory.default().pods().readByRef(null).stopPortForward(srv) */ async close() { await this.accountManager.close(); if (this._portForwards) { for (const srv of this._portForwards) { - await this.k8.pods().readByRef(null).stopPortForward(srv); + // pass null to readByRef because it isn't needed for stopPortForward() + await this.k8Factory.default().pods().readByRef(null).stopPortForward(srv); } } diff --git a/src/commands/node/tasks.ts b/src/commands/node/tasks.ts index 637e0c5fa..2a7d5085e 100644 --- a/src/commands/node/tasks.ts +++ b/src/commands/node/tasks.ts @@ -6,7 +6,7 @@ import {type ConfigManager} from '../../core/config_manager.js'; import {type KeyManager} from '../../core/key_manager.js'; import {type ProfileManager} from '../../core/profile_manager.js'; import {type PlatformInstaller} from '../../core/platform_installer.js'; -import {type K8} from '../../core/kube/k8.js'; +import {type K8Factory} from '../../core/kube/k8_factory.js'; import {type ChartManager} from '../../core/chart_manager.js'; import {type CertificateManager} from '../../core/certificate_manager.js'; import {Zippy} from '../../core/zippy.js'; @@ -76,7 +76,7 @@ export class NodeCommandTasks { private readonly profileManager: ProfileManager; private readonly platformInstaller: PlatformInstaller; private readonly logger: SoloLogger; - private readonly k8: K8; + private readonly k8Factory: K8Factory; private readonly parent: BaseCommand; private readonly chartManager: ChartManager; private readonly certificateManager: CertificateManager; @@ -87,7 +87,7 @@ export class NodeCommandTasks { logger: SoloLogger; accountManager: AccountManager; configManager: ConfigManager; - k8: K8; + k8Factory: K8Factory; platformInstaller: PlatformInstaller; keyManager: KeyManager; profileManager: ProfileManager; @@ -99,7 +99,7 @@ export class NodeCommandTasks { throw new IllegalArgumentError('An instance of core/AccountManager is required', opts.accountManager as any); if (!opts || !opts.configManager) throw new Error('An instance of core/ConfigManager is required'); if (!opts || !opts.logger) throw new Error('An instance of core/Logger is required'); - if (!opts || !opts.k8) throw new Error('An instance of core/K8 is required'); + if (!opts || !opts.k8Factory) throw new Error('An instance of core/K8Factory is required'); if (!opts || !opts.platformInstaller) throw new IllegalArgumentError('An instance of core/PlatformInstaller is required', opts.platformInstaller); if (!opts || !opts.keyManager) @@ -112,7 +112,7 @@ export class NodeCommandTasks { this.accountManager = opts.accountManager; this.configManager = opts.configManager; this.logger = opts.logger; - this.k8 = opts.k8; + this.k8Factory = opts.k8Factory; this.platformInstaller = opts.platformInstaller; this.profileManager = opts.profileManager; @@ -234,7 +234,8 @@ export class NodeCommandTasks { const filterFunction = (path, stat) => { return !(path.includes('data/keys') || path.includes('data/config')); }; - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(ContainerRef.of(podRef, constants.ROOT_CONTAINER)) .copyTo(localDataLibBuildPath, `${constants.HEDERA_HAPI_PATH}`, filterFunction); @@ -242,7 +243,8 @@ export class NodeCommandTasks { const testJsonFiles: string[] = this.configManager.getFlag(flags.appConfig)!.split(','); for (const jsonFile of testJsonFiles) { if (fs.existsSync(jsonFile)) { - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(ContainerRef.of(podRef, constants.ROOT_CONTAINER)) .copyTo(jsonFile, `${constants.HEDERA_HAPI_PATH}`); @@ -357,7 +359,8 @@ export class NodeCommandTasks { }, timeout); try { - const response = await this.k8 + const response = await this.k8Factory + .default() .containers() .readByRef(ContainerRef.of(podRef, constants.ROOT_CONTAINER)) .execContainer([ @@ -424,7 +427,8 @@ export class NodeCommandTasks { subTasks.push({ title: `Check proxy for node: ${chalk.yellow(nodeAlias)}`, task: async ctx => - await this.k8 + await this.k8Factory + .default() .pods() .waitForReadyStatus( ctx.config.namespace, @@ -689,20 +693,22 @@ export class NodeCommandTasks { const containerRef = ContainerRef.of(podRef, constants.ROOT_CONTAINER); // copy the config.txt file from the node1 upgrade directory - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(containerRef) .copyFrom(`${constants.HEDERA_HAPI_PATH}/data/upgrade/current/config.txt`, config.stagingDir); // if directory data/upgrade/current/data/keys does not exist, then use data/upgrade/current let keyDir = `${constants.HEDERA_HAPI_PATH}/data/upgrade/current/data/keys`; - if (!(await self.k8.containers().readByRef(containerRef).hasDir(keyDir))) { + if (!(await self.k8Factory.default().containers().readByRef(containerRef).hasDir(keyDir))) { keyDir = `${constants.HEDERA_HAPI_PATH}/data/upgrade/current`; } - const signedKeyFiles = (await self.k8.containers().readByRef(containerRef).listDir(keyDir)).filter(file => - file.name.startsWith(constants.SIGNING_KEY_PREFIX), - ); - await self.k8 + const signedKeyFiles = ( + await self.k8Factory.default().containers().readByRef(containerRef).listDir(keyDir) + ).filter(file => file.name.startsWith(constants.SIGNING_KEY_PREFIX)); + await self.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer([ @@ -711,19 +717,22 @@ export class NodeCommandTasks { `mkdir -p ${constants.HEDERA_HAPI_PATH}/data/keys_backup && cp -r ${keyDir} ${constants.HEDERA_HAPI_PATH}/data/keys_backup/`, ]); for (const signedKeyFile of signedKeyFiles) { - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(containerRef) .copyFrom(`${keyDir}/${signedKeyFile.name}`, `${config.keysDir}`); } if ( - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(containerRef) .hasFile(`${constants.HEDERA_HAPI_PATH}/data/upgrade/current/application.properties`) ) { - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(containerRef) .copyFrom( @@ -755,10 +764,10 @@ export class NodeCommandTasks { const containerRef = ContainerRef.of(podRef, constants.ROOT_CONTAINER); for (const upgradeDir of upgradeDirectories) { // check if directory upgradeDir exist in root container - if (!(await self.k8.containers().readByRef(containerRef).hasDir(upgradeDir))) { + if (!(await self.k8Factory.default().containers().readByRef(containerRef).hasDir(upgradeDir))) { continue; } - const files = await self.k8.containers().readByRef(containerRef).listDir(upgradeDir); + const files = await self.k8Factory.default().containers().readByRef(containerRef).listDir(upgradeDir); // iterate all files and copy them to the staging directory for (const file of files) { if (file.name.endsWith('.mf')) { @@ -768,7 +777,8 @@ export class NodeCommandTasks { continue; } this.logger.debug(`Copying file: ${file.name}`); - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(containerRef) .copyFrom(`${upgradeDir}/${file.name}`, `${config.stagingDir}`); @@ -828,7 +838,8 @@ export class NodeCommandTasks { const podRef = PodRef.of(namespace, podName); try { - await this.k8 + await this.k8Factory + .default() .pods() .waitForRunningPhase( namespace, @@ -870,16 +881,22 @@ export class NodeCommandTasks { const podRef = ctx.config.podRefs[nodeAlias]; const containerRef = ContainerRef.of(podRef, constants.ROOT_CONTAINER); self.logger.debug(`Uploading state files to pod ${podRef.name}`); - await self.k8.containers().readByRef(containerRef).copyTo(zipFile, `${constants.HEDERA_HAPI_PATH}/data`); + await self.k8Factory + .default() + .containers() + .readByRef(containerRef) + .copyTo(zipFile, `${constants.HEDERA_HAPI_PATH}/data`); self.logger.info( `Deleting the previous state files in pod ${podRef.name} directory ${constants.HEDERA_HAPI_PATH}/data/saved`, ); - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer(['rm', '-rf', `${constants.HEDERA_HAPI_PATH}/data/saved/*`]); - await self.k8 + await self.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer([ @@ -1036,7 +1053,11 @@ export class NodeCommandTasks { subTasks.push({ title: `Start node: ${chalk.yellow(nodeAlias)}`, task: async () => { - await this.k8.containers().readByRef(containerRef).execContainer(['systemctl', 'restart', 'network-node']); + await this.k8Factory + .default() + .containers() + .readByRef(containerRef) + .execContainer(['systemctl', 'restart', 'network-node']); }, }); } @@ -1058,7 +1079,11 @@ export class NodeCommandTasks { async (ctx: any, task: ListrTaskWrapper) => { const podRef = PodRef.of(ctx.config.namespace, PodName.of(`network-${ctx.config.debugNodeAlias}-0`)); this.logger.debug(`Enable port forwarding for JVM debugger on pod ${podRef.name}`); - await this.k8.pods().readByRef(podRef).portForward(constants.JVM_DEBUG_PORT, constants.JVM_DEBUG_PORT); + await this.k8Factory + .default() + .pods() + .readByRef(podRef) + .portForward(constants.JVM_DEBUG_PORT, constants.JVM_DEBUG_PORT); }, (ctx: any) => !ctx.config.debugNodeAlias, ); @@ -1186,7 +1211,11 @@ export class NodeCommandTasks { subTasks.push({ title: `Stop node: ${chalk.yellow(nodeAlias)}`, task: async () => - await this.k8.containers().readByRef(containerRef).execContainer('systemctl stop network-node'), + await this.k8Factory + .default() + .containers() + .readByRef(containerRef) + .execContainer('systemctl stop network-node'), }); } } @@ -1220,7 +1249,8 @@ export class NodeCommandTasks { subTasks.push({ title: `Node: ${chalk.yellow(nodeAlias)}`, task: async () => - await this.k8 + await this.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer(['bash', '-c', `rm -rf ${constants.HEDERA_HAPI_PATH}/data/saved/*`]), @@ -1586,7 +1616,7 @@ export class NodeCommandTasks { return new Task('Kill nodes', async (ctx: any, task: ListrTaskWrapper) => { const config = ctx.config; for (const service of config.serviceMap.values()) { - await this.k8.pods().readByRef(PodRef.of(config.namespace, service.nodePodName)).killPod(); + await this.k8Factory.default().pods().readByRef(PodRef.of(config.namespace, service.nodePodName)).killPod(); } }); } @@ -1600,7 +1630,7 @@ export class NodeCommandTasks { config.serviceMap = await this.accountManager.getNodeServiceMap(config.namespace); for (const service of config.serviceMap.values()) { - await this.k8.pods().readByRef(PodRef.of(config.namespace, service.nodePodName)).killPod(); + await this.k8Factory.default().pods().readByRef(PodRef.of(config.namespace, service.nodePodName)).killPod(); } // again, the pod names will change after the pods are killed @@ -1622,7 +1652,8 @@ export class NodeCommandTasks { subTasks.push({ title: `Check Node: ${chalk.yellow(nodeAlias)}`, task: async () => - await this.k8 + await this.k8Factory + .default() .pods() .waitForRunningPhase( config.namespace, @@ -1657,7 +1688,8 @@ export class NodeCommandTasks { const containerRef = ContainerRef.of(podRef, constants.ROOT_CONTAINER); const upgradeDirectory = `${constants.HEDERA_HAPI_PATH}/data/saved/com.hedera.services.ServicesMain/0/123`; // zip the contents of the newest folder on node1 within /opt/hgcapp/services-hedera/HapiApp2.0/data/saved/com.hedera.services.ServicesMain/0/123/ - const zipFileName = await this.k8 + const zipFileName = await this.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer([ @@ -1665,7 +1697,8 @@ export class NodeCommandTasks { '-c', `cd ${upgradeDirectory} && mapfile -t states < <(ls -1t .) && jar cf "\${states[0]}.zip" -C "\${states[0]}" . && echo -n \${states[0]}.zip`, ]); - await this.k8 + await this.k8Factory + .default() .containers() .readByRef(containerRef) .copyFrom(`${upgradeDirectory}/${zipFileName}`, config.stagingDir); @@ -1684,13 +1717,19 @@ export class NodeCommandTasks { const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias); const savedStateDir = config.lastStateZipPath.match(/\/(\d+)\.zip$/)[1]; const savedStatePath = `${constants.HEDERA_HAPI_PATH}/data/saved/com.hedera.services.ServicesMain/${nodeId}/123/${savedStateDir}`; - await this.k8 + await this.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer(['bash', '-c', `mkdir -p ${savedStatePath}`]); - await this.k8.containers().readByRef(containerRef).copyTo(config.lastStateZipPath, savedStatePath); + await this.k8Factory + .default() + .containers() + .readByRef(containerRef) + .copyTo(config.lastStateZipPath, savedStatePath); await this.platformInstaller.setPathPermission(podRef, constants.HEDERA_HAPI_PATH); - await this.k8 + await this.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer([ diff --git a/src/commands/relay.ts b/src/commands/relay.ts index 8f1b721b0..a8dceecc9 100644 --- a/src/commands/relay.ts +++ b/src/commands/relay.ts @@ -105,7 +105,10 @@ export class RelayCommand extends BaseCommand { try { const deploymentName = this.configManager.getFlag(flags.deployment); const namespace = NamespaceName.of(this.localConfig.deployments[deploymentName].namespace); - const secrets = await this.k8.secrets().list(namespace, [`solo.hedera.com/account-id=${operatorIdUsing}`]); + const secrets = await this.k8Factory + .default() + .secrets() + .list(namespace, [`solo.hedera.com/account-id=${operatorIdUsing}`]); if (secrets.length === 0) { this.logger.info(`No k8s secret found for operator account id ${operatorIdUsing}, use default one`); valuesArg += ` --set config.OPERATOR_KEY_MAIN=${constants.OPERATOR_KEY}`; @@ -266,7 +269,8 @@ export class RelayCommand extends BaseCommand { config.valuesArg, ); - await self.k8 + await self.k8Factory + .default() .pods() .waitForRunningPhase( config.namespace, @@ -284,7 +288,8 @@ export class RelayCommand extends BaseCommand { task: async ctx => { const config = ctx.config; try { - await self.k8 + await self.k8Factory + .default() .pods() .waitForReadyStatus( config.namespace, diff --git a/src/core/account_manager.ts b/src/core/account_manager.ts index 5dc72feb2..241b22ac0 100644 --- a/src/core/account_manager.ts +++ b/src/core/account_manager.ts @@ -28,7 +28,7 @@ import {type NetworkNodeServices, NetworkNodeServicesBuilder} from './network_no import path from 'path'; import {type SoloLogger} from './logging.js'; -import {type K8} from './kube/k8.js'; +import {type K8Factory} from './kube/k8_factory.js'; import {type AccountIdWithKeyPairObject, type ExtendedNetServer} from '../types/index.js'; import {type NodeAlias, type SdkNetworkEndpoint} from '../types/aliases.js'; import {PodName} from './kube/resources/pod/pod_name.js'; @@ -56,10 +56,10 @@ export class AccountManager { constructor( @inject(InjectTokens.SoloLogger) private readonly logger?: SoloLogger, - @inject(InjectTokens.K8) private readonly k8?: K8, + @inject(InjectTokens.K8Factory) private readonly k8Factory?: K8Factory, ) { this.logger = patchInject(logger, InjectTokens.SoloLogger, this.constructor.name); - this.k8 = patchInject(k8, InjectTokens.K8, this.constructor.name); + this.k8Factory = patchInject(k8Factory, InjectTokens.K8Factory, this.constructor.name); this._portForwards = []; this._nodeClient = null; @@ -72,7 +72,8 @@ export class AccountManager { */ async getAccountKeysFromSecret(accountId: string, namespace: NamespaceName): Promise { try { - const secrets = await this.k8 + const secrets = await this.k8Factory + .default() .secrets() .list(namespace, [Templates.renderAccountKeySecretLabelSelector(accountId)]); @@ -147,7 +148,7 @@ export class AccountManager { this._nodeClient?.close(); if (this._portForwards) { for (const srv of this._portForwards) { - await this.k8.pods().readByRef(null).stopPortForward(srv); + await this.k8Factory.default().pods().readByRef(null).stopPortForward(srv); } } @@ -358,7 +359,8 @@ export class AccountManager { if (this._portForwards.length < totalNodes) { this._portForwards.push( - await this.k8 + await this.k8Factory + .default() .pods() .readByRef(PodRef.of(networkNodeService.namespace, networkNodeService.haProxyPodName)) .portForward(localPort, port), @@ -430,7 +432,7 @@ export class AccountManager { const serviceBuilderMap = new Map(); try { - const serviceList = await this.k8.services().list(namespace, [labelSelector]); + const serviceList = await this.k8Factory.default().services().list(namespace, [labelSelector]); let nodeId = '0'; // retrieve the list of services and build custom objects for the attributes we need @@ -508,12 +510,18 @@ export class AccountManager { // get the pod name for the service to use with portForward if needed for (const serviceBuilder of serviceBuilderMap.values()) { - const podList: V1Pod[] = await this.k8.pods().list(namespace, [`app=${serviceBuilder.haProxyAppSelector}`]); + const podList: V1Pod[] = await this.k8Factory + .default() + .pods() + .list(namespace, [`app=${serviceBuilder.haProxyAppSelector}`]); serviceBuilder.withHaProxyPodName(PodName.of(podList[0].metadata.name)); } // get the pod name of the network node - const pods: V1Pod[] = await this.k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const pods: V1Pod[] = await this.k8Factory + .default() + .pods() + .list(namespace, ['solo.hedera.com/type=network-node']); for (const pod of pods) { // eslint-disable-next-line no-prototype-builtins if (!pod.metadata?.labels?.hasOwnProperty('solo.hedera.com/node-name')) { @@ -656,7 +664,8 @@ export class AccountManager { try { const createdOrUpdated = updateSecrets - ? await this.k8 + ? await this.k8Factory + .default() .secrets() .replace( namespace, @@ -665,7 +674,8 @@ export class AccountManager { data, Templates.renderAccountKeySecretLabelObject(accountId), ) - : await this.k8 + : await this.k8Factory + .default() .secrets() .create( namespace, @@ -837,16 +847,19 @@ export class AccountManager { } try { - const accountSecretCreated = await this.k8.secrets().createOrReplace( - namespace, - Templates.renderAccountKeySecretName(accountInfo.accountId), - SecretType.OPAQUE, - { - privateKey: Base64.encode(accountInfo.privateKey), - publicKey: Base64.encode(accountInfo.publicKey), - }, - Templates.renderAccountKeySecretLabelObject(accountInfo.accountId), - ); + const accountSecretCreated = await this.k8Factory + .default() + .secrets() + .createOrReplace( + namespace, + Templates.renderAccountKeySecretName(accountInfo.accountId), + SecretType.OPAQUE, + { + privateKey: Base64.encode(accountInfo.privateKey), + publicKey: Base64.encode(accountInfo.publicKey), + }, + Templates.renderAccountKeySecretLabelObject(accountInfo.accountId), + ); if (!accountSecretCreated) { this.logger.error( diff --git a/src/core/certificate_manager.ts b/src/core/certificate_manager.ts index 4589957fa..245eab6d1 100644 --- a/src/core/certificate_manager.ts +++ b/src/core/certificate_manager.ts @@ -8,7 +8,7 @@ import {Templates} from './templates.js'; import {GrpcProxyTlsEnums} from './enumerations.js'; import {type ConfigManager} from './config_manager.js'; -import {type K8} from './kube/k8.js'; +import {type K8Factory} from './kube/k8_factory.js'; import {type SoloLogger} from './logging.js'; import {type ListrTaskWrapper} from 'listr2'; import {type NodeAlias} from '../types/aliases.js'; @@ -24,11 +24,11 @@ import {InjectTokens} from './dependency_injection/inject_tokens.js'; @injectable() export class CertificateManager { constructor( - @inject(InjectTokens.K8) private readonly k8?: K8, + @inject(InjectTokens.K8Factory) private readonly k8Factory?: K8Factory, @inject(InjectTokens.SoloLogger) private readonly logger?: SoloLogger, @inject(InjectTokens.ConfigManager) private readonly configManager?: ConfigManager, ) { - this.k8 = patchInject(k8, InjectTokens.K8, this.constructor.name); + this.k8Factory = patchInject(k8Factory, InjectTokens.K8Factory, this.constructor.name); this.logger = patchInject(logger, InjectTokens.SoloLogger, this.constructor.name); this.configManager = patchInject(configManager, InjectTokens.ConfigManager, this.constructor.name); } @@ -80,7 +80,10 @@ export class CertificateManager { const namespace = this.getNamespace(); const labels = Templates.renderGrpcTlsCertificatesSecretLabelObject(nodeAlias, type); - const isSecretCreated = await this.k8.secrets().createOrReplace(namespace, name, SecretType.OPAQUE, data, labels); + const isSecretCreated = await this.k8Factory + .default() + .secrets() + .createOrReplace(namespace, name, SecretType.OPAQUE, data, labels); if (!isSecretCreated) { throw new SoloError(`failed to create secret for TLS certificates for node '${nodeAlias}'`); } diff --git a/src/core/cluster_checks.ts b/src/core/cluster_checks.ts index 0ddc95e7a..f0a672de5 100644 --- a/src/core/cluster_checks.ts +++ b/src/core/cluster_checks.ts @@ -6,7 +6,7 @@ import * as constants from './constants.js'; import {patchInject} from './dependency_injection/container_helper.js'; import {type SoloLogger} from './logging.js'; import {inject, injectable} from 'tsyringe-neo'; -import {type K8} from './kube/k8.js'; +import {type K8Factory} from './kube/k8_factory.js'; import {type Pod} from './kube/resources/pod/pod.js'; import {type IngressClass} from './kube/resources/ingress_class/ingress_class.js'; import {type V1Pod, type V1ConfigMap} from '@kubernetes/client-node'; @@ -19,10 +19,10 @@ import {InjectTokens} from './dependency_injection/inject_tokens.js'; export class ClusterChecks { constructor( @inject(InjectTokens.SoloLogger) private readonly logger?: SoloLogger, - @inject(InjectTokens.K8) private readonly k8?: K8, + @inject(InjectTokens.K8Factory) private readonly k8Factory?: K8Factory, ) { this.logger = patchInject(logger, InjectTokens.SoloLogger, this.constructor.name); - this.k8 = patchInject(k8, InjectTokens.K8, this.constructor.name); + this.k8Factory = patchInject(k8Factory, InjectTokens.K8Factory, this.constructor.name); } /** @@ -31,7 +31,7 @@ export class ClusterChecks { */ public async isCertManagerInstalled(): Promise { try { - const pods: Pod[] = await this.k8.pods().listForAllNamespaces(['app=cert-manager']); + const pods: Pod[] = await this.k8Factory.default().pods().listForAllNamespaces(['app=cert-manager']); return pods.length > 0; } catch (e) { @@ -48,7 +48,7 @@ export class ClusterChecks { public async isMinioInstalled(namespace: NamespaceName): Promise { try { // TODO DETECT THE OPERATOR - const pods: V1Pod[] = await this.k8.pods().list(namespace, ['app=minio']); + const pods: V1Pod[] = await this.k8Factory.default().pods().list(namespace, ['app=minio']); return pods.length > 0; } catch (e) { @@ -64,7 +64,7 @@ export class ClusterChecks { */ public async isIngressControllerInstalled(): Promise { try { - const ingressClassList: IngressClass[] = await this.k8.ingressClasses().list(); + const ingressClassList: IngressClass[] = await this.k8Factory.default().ingressClasses().list(); return ingressClassList.length > 0; } catch (e) { @@ -80,7 +80,8 @@ export class ClusterChecks { */ public async isRemoteConfigPresentInAnyNamespace() { try { - const configmaps: V1ConfigMap[] = await this.k8 + const configmaps: V1ConfigMap[] = await this.k8Factory + .default() .configMaps() .listForAllNamespaces([constants.SOLO_REMOTE_CONFIGMAP_LABEL_SELECTOR]); @@ -99,7 +100,10 @@ export class ClusterChecks { */ public async isPrometheusInstalled(namespace: NamespaceName) { try { - const pods: V1Pod[] = await this.k8.pods().list(namespace, ['app.kubernetes.io/name=prometheus']); + const pods: V1Pod[] = await this.k8Factory + .default() + .pods() + .list(namespace, ['app.kubernetes.io/name=prometheus']); return pods.length > 0; } catch (e) { @@ -117,7 +121,8 @@ export class ClusterChecks { */ public async isRemoteConfigPresentInNamespace(namespace: NamespaceName): Promise { try { - const configmaps: V1ConfigMap[] = await this.k8 + const configmaps: V1ConfigMap[] = await this.k8Factory + .default() .configMaps() .list(namespace, [constants.SOLO_REMOTE_CONFIGMAP_LABEL_SELECTOR]); diff --git a/src/core/config/local_config.ts b/src/core/config/local_config.ts index e74f7c665..3fa500dca 100644 --- a/src/core/config/local_config.ts +++ b/src/core/config/local_config.ts @@ -17,7 +17,7 @@ import {IsClusterRefs, IsDeployments} from '../validator_decorators.js'; import {type ConfigManager} from '../config_manager.js'; import {type DeploymentName, type EmailAddress} from './remote/types.js'; import {ErrorMessages} from '../error_messages.js'; -import {type K8} from '../../core/kube/k8.js'; +import {type K8Factory} from '../../core/kube/k8_factory.js'; import {splitFlagInput} from '../helpers.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from '../dependency_injection/container_helper.js'; @@ -129,7 +129,7 @@ export class LocalConfig implements LocalConfigData { this.logger.info(`Wrote local config to ${this.filePath}: ${yamlContent}`); } - public promptLocalConfigTask(k8: K8): SoloListrTask { + public promptLocalConfigTask(k8Factory: K8Factory): SoloListrTask { const self = this; return { @@ -157,7 +157,7 @@ export class LocalConfig implements LocalConfigData { if (!deploymentClusters) { if (isQuiet) { - deploymentClusters = k8.clusters().readCurrent(); + deploymentClusters = k8Factory.default().clusters().readCurrent(); } else { deploymentClusters = await flags.deploymentClusters.prompt(task, deploymentClusters); } @@ -179,7 +179,7 @@ export class LocalConfig implements LocalConfigData { if (!isQuiet) { const promptedContexts: string[] = []; for (const clusterRef of parsedClusterRefs) { - const kubeContexts = k8.contexts().list(); + const kubeContexts = k8Factory.default().contexts().list(); const context: string = await flags.context.prompt(task, kubeContexts, clusterRef); self.clusterRefs[clusterRef] = context; promptedContexts.push(context); @@ -188,7 +188,7 @@ export class LocalConfig implements LocalConfigData { } self.configManager.setFlag(flags.context, promptedContexts.join(',')); } else { - const context = k8.contexts().readCurrent(); + const context = k8Factory.default().contexts().readCurrent(); for (const clusterRef of parsedClusterRefs) { self.clusterRefs[clusterRef] = context; } diff --git a/src/core/config/remote/remote_config_manager.ts b/src/core/config/remote/remote_config_manager.ts index 186a98cdc..a64e99299 100644 --- a/src/core/config/remote/remote_config_manager.ts +++ b/src/core/config/remote/remote_config_manager.ts @@ -10,7 +10,7 @@ import {Flags as flags} from '../../../commands/flags.js'; import * as yaml from 'yaml'; import {ComponentsDataWrapper} from './components_data_wrapper.js'; import {RemoteConfigValidator} from './remote_config_validator.js'; -import {type K8} from '../../kube/k8.js'; +import {type K8Factory} from '../../kube/k8_factory.js'; import {type ClusterRef, type Context, type DeploymentName, type NamespaceNameAsString} from './types.js'; import {type SoloLogger} from '../../logging.js'; import {type ConfigManager} from '../../config_manager.js'; @@ -38,18 +38,18 @@ export class RemoteConfigManager { private remoteConfig: Optional; /** - * @param k8 - The Kubernetes client used for interacting with ConfigMaps. + * @param k8Factory - The Kubernetes client used for interacting with ConfigMaps. * @param logger - The logger for recording activity and errors. * @param localConfig - Local configuration for the remote config. * @param configManager - Manager to retrieve application flags and settings. */ public constructor( - @inject(InjectTokens.K8) private readonly k8?: K8, + @inject(InjectTokens.K8Factory) private readonly k8Factory?: K8Factory, @inject(InjectTokens.SoloLogger) private readonly logger?: SoloLogger, @inject(InjectTokens.LocalConfig) private readonly localConfig?: LocalConfig, @inject(InjectTokens.ConfigManager) private readonly configManager?: ConfigManager, ) { - this.k8 = patchInject(k8, InjectTokens.K8, this.constructor.name); + this.k8Factory = patchInject(k8Factory, InjectTokens.K8Factory, this.constructor.name); this.logger = patchInject(logger, InjectTokens.SoloLogger, this.constructor.name); this.localConfig = patchInject(localConfig, InjectTokens.LocalConfig, this.constructor.name); this.configManager = patchInject(configManager, InjectTokens.ConfigManager, this.constructor.name); @@ -58,7 +58,7 @@ export class RemoteConfigManager { /* ---------- Getters ---------- */ public get currentCluster(): ClusterRef { - return this.k8.clusters().readCurrent() as ClusterRef; + return this.k8Factory.default().clusters().readCurrent() as ClusterRef; } /** @returns the components data wrapper cloned */ @@ -151,10 +151,10 @@ export class RemoteConfigManager { await RemoteConfigValidator.validateComponents( this.configManager.getFlag(flags.namespace), this.remoteConfig.components, - this.k8, + this.k8Factory, ); } catch { - throw new SoloError(ErrorMessages.REMOTE_CONFIG_IS_INVALID(this.k8.clusters().readCurrent())); + throw new SoloError(ErrorMessages.REMOTE_CONFIG_IS_INVALID(this.k8Factory.default().clusters().readCurrent())); } return this.remoteConfig; } @@ -203,7 +203,7 @@ export class RemoteConfigManager { await RemoteConfigValidator.validateComponents( this.configManager.getFlag(flags.namespace), self.remoteConfig.components, - self.k8, + self.k8Factory, ); const additionalCommandData = `Executed by ${self.localConfig.userEmailAddress}: `; @@ -225,10 +225,10 @@ export class RemoteConfigManager { argv: AnyObject, ) { const self = this; - self.k8.contexts().updateCurrent(context); + self.k8Factory.default().contexts().updateCurrent(context); - if (!(await self.k8.namespaces().has(NamespaceName.of(namespace)))) { - await self.k8.namespaces().create(NamespaceName.of(namespace)); + if (!(await self.k8Factory.default().namespaces().has(NamespaceName.of(namespace)))) { + await self.k8Factory.default().namespaces().create(NamespaceName.of(namespace)); } const localConfigExists = this.localConfig.configFileExists(); @@ -270,7 +270,10 @@ export class RemoteConfigManager { */ public async getConfigMap(): Promise { try { - return await this.k8.configMaps().read(this.getNamespace(), constants.SOLO_REMOTE_CONFIGMAP_NAME); + return await this.k8Factory + .default() + .configMaps() + .read(this.getNamespace(), constants.SOLO_REMOTE_CONFIGMAP_NAME); } catch (error: any) { if (!(error instanceof ResourceNotFoundError)) { throw new SoloError('Failed to read remote config from cluster', error); @@ -284,7 +287,8 @@ export class RemoteConfigManager { * Creates a new ConfigMap entry in the Kubernetes cluster with the remote configuration data. */ private async createConfigMap(): Promise { - await this.k8 + await this.k8Factory + .default() .configMaps() .create(this.getNamespace(), constants.SOLO_REMOTE_CONFIGMAP_NAME, constants.SOLO_REMOTE_CONFIGMAP_LABELS, { 'remote-config-data': yaml.stringify(this.remoteConfig.toObject()), @@ -293,7 +297,8 @@ export class RemoteConfigManager { /** Replaces an existing ConfigMap in the Kubernetes cluster with the current remote configuration data. */ private async replaceConfigMap(): Promise { - await this.k8 + await this.k8Factory + .default() .configMaps() .replace(this.getNamespace(), constants.SOLO_REMOTE_CONFIGMAP_NAME, constants.SOLO_REMOTE_CONFIGMAP_LABELS, { 'remote-config-data': yaml.stringify(this.remoteConfig.toObject() as any), @@ -320,7 +325,7 @@ export class RemoteConfigManager { private setDefaultContextIfNotSet(): void { if (this.configManager.hasFlag(flags.context)) return; - const context = this.k8.contexts().readCurrent(); + const context = this.k8Factory.default().contexts().readCurrent(); if (!context) { this.logger.error("Context is not passed and default one can't be acquired", this.localConfig); diff --git a/src/core/config/remote/remote_config_validator.ts b/src/core/config/remote/remote_config_validator.ts index 0f441c60c..968188a17 100644 --- a/src/core/config/remote/remote_config_validator.ts +++ b/src/core/config/remote/remote_config_validator.ts @@ -4,7 +4,7 @@ import * as constants from '../../constants.js'; import {SoloError} from '../../errors.js'; -import {type K8} from '../../kube/k8.js'; +import {type K8Factory} from '../../kube/k8_factory.js'; import {type ComponentsDataWrapper} from './components_data_wrapper.js'; import {type BaseComponent} from './components/base_component.js'; import {type NamespaceName} from '../../kube/resources/namespace/namespace_name.js'; @@ -20,28 +20,32 @@ export class RemoteConfigValidator { * * @param namespace - namespace to validate the components in. * @param components - components which to validate. - * @param k8 - to validate the elements. + * @param k8Factory - to validate the elements. * TODO: Make compatible with multi-cluster K8 implementation */ public static async validateComponents( namespace: NamespaceName, components: ComponentsDataWrapper, - k8: K8, + k8Factory: K8Factory, ): Promise { await Promise.all([ - ...RemoteConfigValidator.validateRelays(namespace, components, k8), - ...RemoteConfigValidator.validateHaProxies(namespace, components, k8), - ...RemoteConfigValidator.validateMirrorNodes(namespace, components, k8), - ...RemoteConfigValidator.validateEnvoyProxies(namespace, components, k8), - ...RemoteConfigValidator.validateConsensusNodes(namespace, components, k8), - ...RemoteConfigValidator.validateMirrorNodeExplorers(namespace, components, k8), + ...RemoteConfigValidator.validateRelays(namespace, components, k8Factory), + ...RemoteConfigValidator.validateHaProxies(namespace, components, k8Factory), + ...RemoteConfigValidator.validateMirrorNodes(namespace, components, k8Factory), + ...RemoteConfigValidator.validateEnvoyProxies(namespace, components, k8Factory), + ...RemoteConfigValidator.validateConsensusNodes(namespace, components, k8Factory), + ...RemoteConfigValidator.validateMirrorNodeExplorers(namespace, components, k8Factory), ]); } - private static validateRelays(namespace: NamespaceName, components: ComponentsDataWrapper, k8: K8): Promise[] { + private static validateRelays( + namespace: NamespaceName, + components: ComponentsDataWrapper, + k8Factory: K8Factory, + ): Promise[] { return Object.values(components.relays).map(async component => { try { - const pods: V1Pod[] = await k8.pods().list(namespace, [constants.SOLO_RELAY_LABEL]); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, [constants.SOLO_RELAY_LABEL]); // to return the generic error message if (!pods.length) throw new Error('Pod not found'); @@ -54,11 +58,14 @@ export class RemoteConfigValidator { private static validateHaProxies( namespace: NamespaceName, components: ComponentsDataWrapper, - k8: K8, + k8Factory: K8Factory, ): Promise[] { return Object.values(components.haProxies).map(async component => { try { - const pods: V1Pod[] = await k8.pods().list(namespace, [`app=${component.name}`]); + const pods: V1Pod[] = await k8Factory + .default() + .pods() + .list(namespace, [`app=${component.name}`]); // to return the generic error message if (!pods.length) throw new Error('Pod not found'); @@ -71,11 +78,11 @@ export class RemoteConfigValidator { private static validateMirrorNodes( namespace: NamespaceName, components: ComponentsDataWrapper, - k8: K8, + k8Factory: K8Factory, ): Promise[] { return Object.values(components.mirrorNodes).map(async component => { try { - const pods: V1Pod[] = await k8.pods().list(namespace, constants.SOLO_HEDERA_MIRROR_IMPORTER); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, constants.SOLO_HEDERA_MIRROR_IMPORTER); // to return the generic error message if (!pods.length) throw new Error('Pod not found'); @@ -88,11 +95,14 @@ export class RemoteConfigValidator { private static validateEnvoyProxies( namespace: NamespaceName, components: ComponentsDataWrapper, - k8: K8, + k8Factory: K8Factory, ): Promise[] { return Object.values(components.envoyProxies).map(async component => { try { - const pods: V1Pod[] = await k8.pods().list(namespace, [`app=${component.name}`]); + const pods: V1Pod[] = await k8Factory + .default() + .pods() + .list(namespace, [`app=${component.name}`]); // to return the generic error message if (!pods.length) throw new Error('Pod not found'); @@ -105,11 +115,14 @@ export class RemoteConfigValidator { private static validateConsensusNodes( namespace: NamespaceName, components: ComponentsDataWrapper, - k8: K8, + k8Factory: K8Factory, ): Promise[] { return Object.values(components.consensusNodes).map(async component => { try { - const pods: V1Pod[] = await k8.pods().list(namespace, [`app=network-${component.name}`]); + const pods: V1Pod[] = await k8Factory + .default() + .pods() + .list(namespace, [`app=network-${component.name}`]); // to return the generic error message if (!pods.length) throw new Error('Pod not found'); @@ -122,11 +135,11 @@ export class RemoteConfigValidator { private static validateMirrorNodeExplorers( namespace: NamespaceName, components: ComponentsDataWrapper, - k8: K8, + k8Factory: K8Factory, ): Promise[] { return Object.values(components.mirrorNodeExplorers).map(async component => { try { - const pods: V1Pod[] = await k8.pods().list(namespace, [constants.SOLO_HEDERA_EXPLORER_LABEL]); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, [constants.SOLO_HEDERA_EXPLORER_LABEL]); // to return the generic error message if (!pods.length) throw new Error('Pod not found'); diff --git a/src/core/dependency_injection/container_init.ts b/src/core/dependency_injection/container_init.ts index f6ce77bc2..760af251a 100644 --- a/src/core/dependency_injection/container_init.ts +++ b/src/core/dependency_injection/container_init.ts @@ -26,6 +26,7 @@ import * as version from '../../../version.js'; import {NetworkNodes} from '../network_nodes.js'; import {ClusterChecks} from '../cluster_checks.js'; import {InjectTokens} from './inject_tokens.js'; +import {K8ClientFactory} from '../kube/k8_client/k8_client_factory.js'; /** * Container class to manage the dependency injection container @@ -79,7 +80,7 @@ export class Container { container.register(InjectTokens.ChartManager, {useClass: ChartManager}, {lifecycle: Lifecycle.Singleton}); container.register(InjectTokens.ConfigManager, {useClass: ConfigManager}, {lifecycle: Lifecycle.Singleton}); - container.register(InjectTokens.K8, {useClass: K8Client}, {lifecycle: Lifecycle.Singleton}); + container.register(InjectTokens.K8Factory, {useClass: K8ClientFactory}, {lifecycle: Lifecycle.Singleton}); container.register(InjectTokens.AccountManager, {useClass: AccountManager}, {lifecycle: Lifecycle.Singleton}); container.register(InjectTokens.PlatformInstaller, {useClass: PlatformInstaller}, {lifecycle: Lifecycle.Singleton}); container.register(InjectTokens.KeyManager, {useClass: KeyManager}, {lifecycle: Lifecycle.Singleton}); diff --git a/src/core/dependency_injection/inject_tokens.ts b/src/core/dependency_injection/inject_tokens.ts index 2d1540b60..9e77b97c3 100644 --- a/src/core/dependency_injection/inject_tokens.ts +++ b/src/core/dependency_injection/inject_tokens.ts @@ -14,7 +14,7 @@ export const InjectTokens = { CacheDir: Symbol.for('CacheDir'), LocalConfigFilePath: Symbol.for('LocalConfigFilePath'), LeaseRenewalService: Symbol.for('LeaseRenewalService'), - K8: Symbol.for('K8'), + K8Factory: Symbol.for('K8Factory'), SoloLogger: Symbol.for('SoloLogger'), PackageDownloader: Symbol.for('PackageDownloader'), Zippy: Symbol.for('Zippy'), diff --git a/src/core/kube/k8_client/k8_client.ts b/src/core/kube/k8_client/k8_client.ts index 2c106b25b..7f5c29cc9 100644 --- a/src/core/kube/k8_client/k8_client.ts +++ b/src/core/kube/k8_client/k8_client.ts @@ -3,10 +3,6 @@ */ import * as k8s from '@kubernetes/client-node'; import {SoloError} from '../../errors.js'; -import {type ConfigManager} from '../../config_manager.js'; -import {type SoloLogger} from '../../logging.js'; -import {inject, injectable} from 'tsyringe-neo'; -import {patchInject} from '../../dependency_injection/container_helper.js'; import {type K8} from '../k8.js'; import {type Namespaces} from '../resources/namespace/namespaces.js'; import {K8ClientClusters} from '../k8_client/resources/cluster/k8_client_clusters.js'; @@ -32,7 +28,6 @@ import {type Secrets} from '../resources/secret/secrets.js'; import {K8ClientSecrets} from '../k8_client/resources/secret/k8_client_secrets.js'; import {type Ingresses} from '../resources/ingress/ingresses.js'; import {K8ClientIngresses} from './resources/ingress/k8_client_ingresses.js'; -import {InjectTokens} from '../../dependency_injection/inject_tokens.js'; /** * A kubernetes API wrapper class providing custom functionalities required by solo @@ -40,11 +35,10 @@ import {InjectTokens} from '../../dependency_injection/inject_tokens.js'; * Note: Take care if the same instance is used for parallel execution, as the behaviour may be unpredictable. * For parallel execution, create separate instances by invoking clone() */ -@injectable() export class K8Client implements K8 { private kubeConfig!: k8s.KubeConfig; - kubeClient!: k8s.CoreV1Api; - private coordinationApiClient: k8s.CoordinationV1Api; + private kubeClient!: k8s.CoreV1Api; + private coordinationApiClient!: k8s.CoordinationV1Api; private networkingApi!: k8s.NetworkingV1Api; private k8Leases: Leases; @@ -60,20 +54,17 @@ export class K8Client implements K8 { private k8Secrets: Secrets; private k8Ingresses: Ingresses; - constructor( - @inject(InjectTokens.ConfigManager) private readonly configManager?: ConfigManager, - @inject(InjectTokens.SoloLogger) private readonly logger?: SoloLogger, - ) { - this.configManager = patchInject(configManager, InjectTokens.ConfigManager, this.constructor.name); - this.logger = patchInject(logger, InjectTokens.SoloLogger, this.constructor.name); - - this.init(); + /** + * Create a new k8Factory client for the given context, if context is undefined it will use the current context in kubeconfig + * @param context - The context to create the k8Factory client for + */ + constructor(private readonly context: string) { + this.init(this.context); } // TODO make private, but first we need to require a cluster to be set and address the test cases using this - init(): K8 { - this.kubeConfig = new k8s.KubeConfig(); - this.kubeConfig.loadFromDefault(); + init(context: string = undefined): K8 { + this.kubeConfig = this.getKubeConfig(context); if (!this.kubeConfig.getCurrentContext()) { throw new SoloError('No active kubernetes context found. ' + 'Please set current kubernetes context.'); @@ -100,7 +91,24 @@ export class K8Client implements K8 { this.k8Secrets = new K8ClientSecrets(this.kubeClient); this.k8Ingresses = new K8ClientIngresses(this.networkingApi); - return this; // to enable chaining + return this; + } + + private getKubeConfig(context: string): k8s.KubeConfig { + const kubeConfig = new k8s.KubeConfig(); + kubeConfig.loadFromDefault(); + + if (context) { + const kubeConfigContext: k8s.Context = kubeConfig.getContextObject(context); + + if (!kubeConfigContext) { + throw new SoloError(`No kube config context found with name ${context}`); + } + + kubeConfig.setCurrentContext(context); + } + + return kubeConfig; } public namespaces(): Namespaces { diff --git a/src/core/kube/k8_client/k8_client_factory.ts b/src/core/kube/k8_client/k8_client_factory.ts new file mode 100644 index 000000000..fe8f58615 --- /dev/null +++ b/src/core/kube/k8_client/k8_client_factory.ts @@ -0,0 +1,34 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + */ +import {type K8Factory} from '../k8_factory.js'; +import {type K8} from '../k8.js'; +import {K8Client} from './k8_client.js'; +import {injectable} from 'tsyringe-neo'; + +@injectable() +export class K8ClientFactory implements K8Factory { + private readonly k8Clients: Map = new Map(); + + public getK8(context: string): K8 { + if (!this.k8Clients.has(context)) { + this.k8Clients.set(context, this.createK8Client(context)); + } + + return this.k8Clients.get(context)!; + } + + /** + * Create a new k8Factory client for the given context + * @param context - The context to create the k8Factory client for + * @returns a new k8Factory client + * @private + */ + private createK8Client(context: string): K8 { + return new K8Client(context); + } + + public default(): K8 { + return new K8Client(undefined); + } +} diff --git a/src/core/kube/k8_factory.ts b/src/core/kube/k8_factory.ts new file mode 100644 index 000000000..0967cdfc2 --- /dev/null +++ b/src/core/kube/k8_factory.ts @@ -0,0 +1,17 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + */ +import {type K8} from './k8.js'; + +export interface K8Factory { + /** + * Get a K8 instance for the given context + * @param context - The context to get the K8 instance for + */ + getK8(context: string): K8; + + /** + * Get the default K8 instance which uses the kubeconfig current context + */ + default(): K8; +} diff --git a/src/core/kube/resources/cluster/clusters.ts b/src/core/kube/resources/cluster/clusters.ts index 8a080e894..7ba10453e 100644 --- a/src/core/kube/resources/cluster/clusters.ts +++ b/src/core/kube/resources/cluster/clusters.ts @@ -6,11 +6,11 @@ export interface Clusters { * Returns a list of clusters that are in the kubeconfig file * @returns a list of cluster names */ - list(): string[]; + list(): string[]; // TODO should this be removed and `solo cluster list` use local config cluster list? /** * Returns the current cluster name as defined in the kubeconfig file * @returns the current cluster name */ - readCurrent(): string; + readCurrent(): string; // TODO remove read current cluster, this should not be needed } diff --git a/src/core/kube/resources/lease/leases.ts b/src/core/kube/resources/lease/leases.ts index 16853ccd3..e17fff27a 100644 --- a/src/core/kube/resources/lease/leases.ts +++ b/src/core/kube/resources/lease/leases.ts @@ -13,7 +13,7 @@ export interface Leases { * @param durationSeconds - the duration of the lease in seconds\ * @returns the created lease */ - create(namespace: NamespaceName, leaseName: string, holderName: string, durationSeconds: number): Promise; // TODO was createNamespacedLease + create(namespace: NamespaceName, leaseName: string, holderName: string, durationSeconds: number): Promise; /** * Delete a lease @@ -21,7 +21,7 @@ export interface Leases { * @param name - the name of the lease * @returns the status of the deletion */ - delete(namespace: NamespaceName, name: string): Promise; // TODO was deleteNamespacedLease + delete(namespace: NamespaceName, name: string): Promise; /** * Returns the lease with the specified name @@ -30,7 +30,7 @@ export interface Leases { * @param timesCalled - the number of times this function has been called * @returns a list of lease names */ - read(namespace: NamespaceName, leaseName: string, timesCalled?: number): Promise; // TODO was readNamespacedLease + read(namespace: NamespaceName, leaseName: string, timesCalled?: number): Promise; /** * Renew a lease @@ -39,7 +39,7 @@ export interface Leases { * @param lease - the lease object * @returns the renewed lease */ - renew(namespace: NamespaceName, leaseName: string, lease: V1Lease): Promise; // TODO was renewNamespaceLease + renew(namespace: NamespaceName, leaseName: string, lease: V1Lease): Promise; /** * Transfer a lease @@ -47,5 +47,5 @@ export interface Leases { * @param newHolderName - the name of the new lease holder * @returns the transferred lease */ - transfer(lease: V1Lease, newHolderName: string): Promise; // TODO was transferNamespaceLease + transfer(lease: V1Lease, newHolderName: string): Promise; } diff --git a/src/core/kube/resources/secret/secrets.ts b/src/core/kube/resources/secret/secrets.ts index 78e531ed8..ce226beff 100644 --- a/src/core/kube/resources/secret/secrets.ts +++ b/src/core/kube/resources/secret/secrets.ts @@ -21,7 +21,7 @@ export interface Secrets { secretType: SecretType, data: Record, labels: Optional>, - ): Promise; // TODO was createSecret + ): Promise; createOrReplace( namespace: NamespaceName, @@ -48,7 +48,7 @@ export interface Secrets { namespace: string; type: string; labels: Record; - }>; // TODO was getSecret + }>; /** * Delete a secret from the namespace @@ -56,7 +56,7 @@ export interface Secrets { * @param name - the name of the existing secret * @returns whether the secret was deleted successfully */ - delete(namespace: NamespaceName, name: string): Promise; // TODO was deleteSecret + delete(namespace: NamespaceName, name: string): Promise; /** * Get secrets by labels @@ -75,9 +75,7 @@ export interface Secrets { type: string; labels: Record; }> - >; // TODO was getSecretsByLabel(labels: string[]): Promise - // TODO consolidate getSecret into listByLabel - // TODO consolidate listSecretsByNamespace into listByLabel + >; - exists(namespace: NamespaceName, name: string): Promise; // TODO was secretExists + exists(namespace: NamespaceName, name: string): Promise; } diff --git a/src/core/kube/resources/service/services.ts b/src/core/kube/resources/service/services.ts index 60ea281fb..cfa27bd5f 100644 --- a/src/core/kube/resources/service/services.ts +++ b/src/core/kube/resources/service/services.ts @@ -11,14 +11,14 @@ export interface Services { * @param namespace - namespace * @param name - service name */ - read(namespace: NamespaceName, name: string): Promise; // TODO was getSvcByName + read(namespace: NamespaceName, name: string): Promise; /** * List all services in a namespace * @param namespace - namespace * @param labels - labels */ - list(namespace: NamespaceName, labels?: string[]): Promise; // TODO was listSvcs + list(namespace: NamespaceName, labels?: string[]): Promise; /** * Create a service diff --git a/src/core/lease/interval_lease.ts b/src/core/lease/interval_lease.ts index 2878742c3..2b0ee1267 100644 --- a/src/core/lease/interval_lease.ts +++ b/src/core/lease/interval_lease.ts @@ -3,7 +3,7 @@ */ import {MissingArgumentError, SoloError} from '../errors.js'; import {type V1Lease} from '@kubernetes/client-node'; -import {type K8} from '../../core/kube/k8.js'; +import {type K8Factory} from '../../core/kube/k8_factory.js'; import {LeaseHolder} from './lease_holder.js'; import {LeaseAcquisitionError, LeaseRelinquishmentError} from './lease_errors.js'; import {sleep} from '../helpers.js'; @@ -40,7 +40,7 @@ export class IntervalLease implements Lease { private _scheduleId: number | null = null; /** - * @param client - Injected kubernetes client need by the methods to create, renew, and delete leases. + * @param k8Factory - Injected kubernetes K8Factory need by the methods to create, renew, and delete leases. * @param renewalService - Injected lease renewal service need to support automatic (background) lease renewals. * @param leaseHolder - The holder of the lease. * @param namespace - The namespace in which the lease is to be acquired. @@ -48,14 +48,14 @@ export class IntervalLease implements Lease { * @param durationSeconds - The duration in seconds for which the lease is to be held; if not provided, the default value is used. */ public constructor( - readonly client: K8, + readonly k8Factory: K8Factory, readonly renewalService: LeaseRenewalService, leaseHolder: LeaseHolder, namespace: NamespaceName, leaseName: string | null = null, durationSeconds: number | null = null, ) { - if (!client) throw new MissingArgumentError('client is required'); + if (!k8Factory) throw new MissingArgumentError('k8Factory is required'); if (!renewalService) throw new MissingArgumentError('renewalService is required'); if (!leaseHolder) throw new MissingArgumentError('_leaseHolder is required'); if (!namespace) throw new MissingArgumentError('_namespace is required'); @@ -282,7 +282,7 @@ export class IntervalLease implements Lease { */ private async retrieveLease(): Promise { try { - return await this.client.leases().read(this.namespace, this.leaseName); + return await this.k8Factory.default().leases().read(this.namespace, this.leaseName); } catch (e: any) { if (!(e instanceof SoloError)) { throw new LeaseAcquisitionError( @@ -311,11 +311,12 @@ export class IntervalLease implements Lease { private async createOrRenewLease(lease: V1Lease): Promise { try { if (!lease) { - await this.client + await this.k8Factory + .default() .leases() .create(this.namespace, this.leaseName, this.leaseHolder.toJson(), this.durationSeconds); } else { - await this.client.leases().renew(this.namespace, this.leaseName, lease); + await this.k8Factory.default().leases().renew(this.namespace, this.leaseName, lease); } if (!this.scheduleId) { @@ -336,7 +337,7 @@ export class IntervalLease implements Lease { */ private async transferLease(lease: V1Lease): Promise { try { - await this.client.leases().transfer(lease, this.leaseHolder.toJson()); + await this.k8Factory.default().leases().transfer(lease, this.leaseHolder.toJson()); if (!this.scheduleId) { this.scheduleId = await this.renewalService.schedule(this); @@ -354,7 +355,7 @@ export class IntervalLease implements Lease { */ private async deleteLease(): Promise { try { - await this.client.leases().delete(this.namespace, this.leaseName); + await this.k8Factory.default().leases().delete(this.namespace, this.leaseName); } catch (e: any) { throw new LeaseRelinquishmentError( `failed to delete the lease named '${this.leaseName}' in the ` + `'${this.namespace}' namespace`, diff --git a/src/core/lease/lease.ts b/src/core/lease/lease.ts index 18cf6f6f9..5091b4f34 100644 --- a/src/core/lease/lease.ts +++ b/src/core/lease/lease.ts @@ -1,13 +1,13 @@ /** * SPDX-License-Identifier: Apache-2.0 */ -import {type K8} from '../../core/kube/k8.js'; +import {type K8Factory} from '../../core/kube/k8_factory.js'; import {type LeaseHolder} from './lease_holder.js'; import {type Duration} from '../time/duration.js'; import {type NamespaceName} from '../kube/resources/namespace/namespace_name.js'; export interface Lease { - readonly client: K8; + readonly k8Factory: K8Factory; readonly renewalService: LeaseRenewalService; readonly leaseName: string; readonly leaseHolder: LeaseHolder; diff --git a/src/core/lease/lease_manager.ts b/src/core/lease/lease_manager.ts index 6063a4a7a..8318dd35c 100644 --- a/src/core/lease/lease_manager.ts +++ b/src/core/lease/lease_manager.ts @@ -3,7 +3,7 @@ */ import {Flags as flags} from '../../commands/flags.js'; import {type ConfigManager} from '../config_manager.js'; -import {type K8} from '../../core/kube/k8.js'; +import {type K8Factory} from '../../core/kube/k8_factory.js'; import {type SoloLogger} from '../logging.js'; import {type Lease, type LeaseRenewalService} from './lease.js'; import {IntervalLease} from './interval_lease.js'; @@ -24,18 +24,18 @@ export class LeaseManager { * * @param _renewalService - the lease renewal service. * @param _logger - the logger. - * @param k8 - the Kubernetes client. + * @param k8Factory - the Kubernetes client. * @param configManager - the configuration manager. */ constructor( @inject(InjectTokens.LeaseRenewalService) private readonly _renewalService?: LeaseRenewalService, @inject(InjectTokens.SoloLogger) private readonly _logger?: SoloLogger, - @inject(InjectTokens.K8) private readonly k8?: K8, + @inject(InjectTokens.K8Factory) private readonly k8Factory?: K8Factory, @inject(InjectTokens.ConfigManager) private readonly configManager?: ConfigManager, ) { this._renewalService = patchInject(_renewalService, InjectTokens.LeaseRenewalService, this.constructor.name); this._logger = patchInject(_logger, InjectTokens.SoloLogger, this.constructor.name); - this.k8 = patchInject(k8, InjectTokens.K8, this.constructor.name); + this.k8Factory = patchInject(k8Factory, InjectTokens.K8Factory, this.constructor.name); this.configManager = patchInject(configManager, InjectTokens.ConfigManager, this.constructor.name); } @@ -45,7 +45,12 @@ export class LeaseManager { * @returns a new lease instance. */ public async create(): Promise { - return new IntervalLease(this.k8, this._renewalService, LeaseHolder.default(), await this.currentNamespace()); + return new IntervalLease( + this.k8Factory, + this._renewalService, + LeaseHolder.default(), + await this.currentNamespace(), + ); } /** @@ -81,10 +86,10 @@ export class LeaseManager { } const namespace = deploymentNamespace ? deploymentNamespace : clusterSetupNamespace; - if (!(await this.k8.namespaces().has(namespace))) { - await this.k8.namespaces().create(namespace); + if (!(await this.k8Factory.default().namespaces().has(namespace))) { + await this.k8Factory.default().namespaces().create(namespace); - if (!(await this.k8.namespaces().has(namespace))) { + if (!(await this.k8Factory.default().namespaces().has(namespace))) { throw new LeaseAcquisitionError(`failed to create the '${namespace}' namespace`); } } diff --git a/src/core/network_nodes.ts b/src/core/network_nodes.ts index ca3174ac8..ad1413d6b 100644 --- a/src/core/network_nodes.ts +++ b/src/core/network_nodes.ts @@ -13,7 +13,7 @@ import {sleep} from './helpers.js'; import {Duration} from './time/duration.js'; import {inject, injectable} from 'tsyringe-neo'; import {type SoloLogger} from './logging.js'; -import {type K8} from './kube/k8.js'; +import {type K8Factory} from './kube/k8_factory.js'; import {patchInject} from './dependency_injection/container_helper.js'; import {type V1Pod} from '@kubernetes/client-node'; import {InjectTokens} from './dependency_injection/inject_tokens.js'; @@ -25,10 +25,10 @@ import {InjectTokens} from './dependency_injection/inject_tokens.js'; export class NetworkNodes { constructor( @inject(InjectTokens.SoloLogger) private readonly logger?: SoloLogger, - @inject(InjectTokens.K8) private readonly k8?: K8, + @inject(InjectTokens.K8Factory) private readonly k8Factory?: K8Factory, ) { this.logger = patchInject(logger, InjectTokens.SoloLogger, this.constructor.name); - this.k8 = patchInject(k8, InjectTokens.K8, this.constructor.name); + this.k8Factory = patchInject(k8Factory, InjectTokens.K8Factory, this.constructor.name); } /** @@ -37,7 +37,7 @@ export class NetworkNodes { * @returns a promise that resolves when the logs are downloaded */ public async getLogs(namespace: NamespaceName) { - const pods: V1Pod[] = await this.k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const pods: V1Pod[] = await this.k8Factory.default().pods().list(namespace, ['solo.hedera.com/type=network-node']); const timeString = new Date().toISOString().replace(/:/g, '-').replace(/\./g, '-'); @@ -59,9 +59,10 @@ export class NetworkNodes { const containerRef = ContainerRef.of(podRef, ROOT_CONTAINER); const scriptName = 'support-zip.sh'; const sourcePath = path.join(constants.RESOURCES_DIR, scriptName); // script source path - await this.k8.containers().readByRef(containerRef).copyTo(sourcePath, `${HEDERA_HAPI_PATH}`); + await this.k8Factory.default().containers().readByRef(containerRef).copyTo(sourcePath, `${HEDERA_HAPI_PATH}`); await sleep(Duration.ofSeconds(3)); // wait for the script to sync to the file system - await this.k8 + await this.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer([ @@ -69,12 +70,18 @@ export class NetworkNodes { '-c', `sync ${HEDERA_HAPI_PATH} && sudo chown hedera:hedera ${HEDERA_HAPI_PATH}/${scriptName}`, ]); - await this.k8 + await this.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer(['bash', '-c', `sudo chmod 0755 ${HEDERA_HAPI_PATH}/${scriptName}`]); - await this.k8.containers().readByRef(containerRef).execContainer(`${HEDERA_HAPI_PATH}/${scriptName}`); - await this.k8 + await this.k8Factory + .default() + .containers() + .readByRef(containerRef) + .execContainer(`${HEDERA_HAPI_PATH}/${scriptName}`); + await this.k8Factory + .default() .containers() .readByRef(containerRef) .copyFrom(`${HEDERA_HAPI_PATH}/data/${podRef.name}.zip`, targetDir); @@ -93,7 +100,8 @@ export class NetworkNodes { * @returns a promise that resolves when the state files are downloaded */ public async getStatesFromPod(namespace: NamespaceName, nodeAlias: string) { - const pods: V1Pod[] = await this.k8 + const pods: V1Pod[] = await this.k8Factory + .default() .pods() .list(namespace, [`solo.hedera.com/node-name=${nodeAlias}`, 'solo.hedera.com/type=network-node']); @@ -115,8 +123,9 @@ export class NetworkNodes { } const zipCommand = `tar -czf ${HEDERA_HAPI_PATH}/${podRef.name}-state.zip -C ${HEDERA_HAPI_PATH}/data/saved .`; const containerRef = ContainerRef.of(podRef, ROOT_CONTAINER); - await this.k8.containers().readByRef(containerRef).execContainer(zipCommand); - await this.k8 + await this.k8Factory.default().containers().readByRef(containerRef).execContainer(zipCommand); + await this.k8Factory + .default() .containers() .readByRef(containerRef) .copyFrom(`${HEDERA_HAPI_PATH}/${podRef.name}-state.zip`, targetDir); diff --git a/src/core/platform_installer.ts b/src/core/platform_installer.ts index 958284c39..e558b8049 100644 --- a/src/core/platform_installer.ts +++ b/src/core/platform_installer.ts @@ -7,7 +7,7 @@ import * as path from 'path'; import {IllegalArgumentError, MissingArgumentError, SoloError} from './errors.js'; import * as constants from './constants.js'; import {type ConfigManager} from './config_manager.js'; -import {type K8} from '../core/kube/k8.js'; +import {type K8Factory} from '../core/kube/k8_factory.js'; import {Templates} from './templates.js'; import {Flags as flags} from '../commands/flags.js'; import * as Base64 from 'js-base64'; @@ -30,11 +30,11 @@ import {InjectTokens} from './dependency_injection/inject_tokens.js'; export class PlatformInstaller { constructor( @inject(InjectTokens.SoloLogger) private logger?: SoloLogger, - @inject(InjectTokens.K8) private k8?: K8, + @inject(InjectTokens.K8Factory) private k8Factory?: K8Factory, @inject(InjectTokens.ConfigManager) private configManager?: ConfigManager, ) { this.logger = patchInject(logger, InjectTokens.SoloLogger, this.constructor.name); - this.k8 = patchInject(k8, InjectTokens.K8, this.constructor.name); + this.k8Factory = patchInject(k8Factory, InjectTokens.K8Factory, this.constructor.name); this.configManager = patchInject(configManager, InjectTokens.ConfigManager, this.constructor.name); } @@ -98,8 +98,8 @@ export class PlatformInstaller { const extractScript = path.join(constants.HEDERA_USER_HOME_DIR, scriptName); // inside the container const containerRef = ContainerRef.of(podRef, constants.ROOT_CONTAINER); - await this.k8.containers().readByRef(containerRef).execContainer(`chmod +x ${extractScript}`); - await this.k8.containers().readByRef(containerRef).execContainer([extractScript, tag]); + await this.k8Factory.default().containers().readByRef(containerRef).execContainer(`chmod +x ${extractScript}`); + await this.k8Factory.default().containers().readByRef(containerRef).execContainer([extractScript, tag]); return true; } catch (e: Error | any) { const message = `failed to extract platform code in this pod '${podRef.name}': ${e.message}`; @@ -127,12 +127,12 @@ export class PlatformInstaller { throw new SoloError(`file does not exist: ${srcPath}`); } - if (!(await this.k8.containers().readByRef(containerRef).hasDir(destDir))) { - await this.k8.containers().readByRef(containerRef).mkdir(destDir); + if (!(await this.k8Factory.default().containers().readByRef(containerRef).hasDir(destDir))) { + await this.k8Factory.default().containers().readByRef(containerRef).mkdir(destDir); } this.logger.debug(`Copying file into ${podRef.name}: ${srcPath} -> ${destDir}`); - await this.k8.containers().readByRef(containerRef).copyTo(srcPath, destDir); + await this.k8Factory.default().containers().readByRef(containerRef).copyTo(srcPath, destDir); const fileName = path.basename(srcPath); copiedFiles.push(path.join(destDir, fileName)); @@ -168,7 +168,8 @@ export class PlatformInstaller { data[fileName] = Base64.encode(fileContents); } - const secretCreated = await this.k8 + const secretCreated = await this.k8Factory + .default() .secrets() .createOrReplace( this._getNamespace(), @@ -213,7 +214,8 @@ export class PlatformInstaller { } } - const secretCreated = await this.k8 + const secretCreated = await this.k8Factory + .default() .secrets() .createOrReplace(this._getNamespace(), 'network-node-hapi-app-secrets', SecretType.OPAQUE, data, undefined); @@ -238,11 +240,13 @@ export class PlatformInstaller { const containerRef = ContainerRef.of(podRef, container); const recursiveFlag = recursive ? '-R' : ''; - await this.k8 + await this.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer(['bash', '-c', `chown ${recursiveFlag} hedera:hedera ${destPath} 2>/dev/null || true`]); - await this.k8 + await this.k8Factory + .default() .containers() .readByRef(containerRef) .execContainer(['bash', '-c', `chmod ${recursiveFlag} ${mode} ${destPath} 2>/dev/null || true`]); diff --git a/src/index.ts b/src/index.ts index ad0139a27..7c8f104ad 100644 --- a/src/index.ts +++ b/src/index.ts @@ -27,7 +27,7 @@ import {type CertificateManager} from './core/certificate_manager.js'; import {type LocalConfig} from './core/config/local_config.js'; import {type RemoteConfigManager} from './core/config/remote/remote_config_manager.js'; import * as helpers from './core/helpers.js'; -import {type K8} from './core/kube/k8.js'; +import {type K8Factory} from './core/kube/k8_factory.js'; import {CustomProcessOutput} from './core/process_output.js'; import {type Opts} from './types/command_types.js'; import {type SoloLogger} from './core/logging.js'; @@ -54,7 +54,7 @@ export function main(argv: any) { const helm: Helm = container.resolve(InjectTokens.Helm); const chartManager: ChartManager = container.resolve(InjectTokens.ChartManager); const configManager: ConfigManager = container.resolve(InjectTokens.ConfigManager); - const k8: K8 = container.resolve(InjectTokens.K8); + const k8Factory: K8Factory = container.resolve(InjectTokens.K8Factory); const accountManager: AccountManager = container.resolve(InjectTokens.AccountManager); const platformInstaller: PlatformInstaller = container.resolve(InjectTokens.PlatformInstaller); const keyManager: KeyManager = container.resolve(InjectTokens.KeyManager); @@ -66,14 +66,14 @@ export function main(argv: any) { // set cluster and namespace in the global configManager from kubernetes context // so that we don't need to prompt the user - const contextNamespace: NamespaceName = k8.contexts().readCurrentNamespace(); - const currentClusterName: string = k8.clusters().readCurrent(); - const contextName: string = k8.contexts().readCurrent(); + const contextNamespace: NamespaceName = k8Factory.default().contexts().readCurrentNamespace(); + const currentClusterName: string = k8Factory.default().clusters().readCurrent(); + const contextName: string = k8Factory.default().contexts().readCurrent(); const opts: Opts = { logger, helm, - k8, + k8Factory, downloader, platformInstaller, chartManager, diff --git a/src/types/command_types.ts b/src/types/command_types.ts index c946f7c71..3ab7bc011 100644 --- a/src/types/command_types.ts +++ b/src/types/command_types.ts @@ -3,7 +3,7 @@ */ import {type SoloLogger} from '../core/logging.js'; import {type Helm} from '../core/helm.js'; -import {type K8} from '../core/kube/k8.js'; +import {type K8Factory} from '../core/kube/k8_factory.js'; import {type PackageDownloader} from '../core/package_downloader.js'; import {type PlatformInstaller} from '../core/platform_installer.js'; import {type ChartManager} from '../core/chart_manager.js'; @@ -20,7 +20,7 @@ import {type RemoteConfigManager} from '../core/config/remote/remote_config_mana export interface Opts { logger: SoloLogger; helm: Helm; - k8: K8; + k8Factory: K8Factory; downloader: PackageDownloader; platformInstaller: PlatformInstaller; chartManager: ChartManager; diff --git a/test/e2e/commands/account.test.ts b/test/e2e/commands/account.test.ts index ab0679af3..3314edebf 100644 --- a/test/e2e/commands/account.test.ts +++ b/test/e2e/commands/account.test.ts @@ -23,7 +23,7 @@ import {e2eTestSuite, getDefaultArgv, HEDERA_PLATFORM_VERSION_TAG, TEST_CLUSTER, import {AccountCommand} from '../../../src/commands/account.js'; import {Flags as flags} from '../../../src/commands/flags.js'; import {Duration} from '../../../src/core/time/duration.js'; -import {type K8} from '../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../src/core/kube/k8_factory.js'; import {type AccountManager} from '../../../src/core/account_manager.js'; import {type ConfigManager} from '../../../src/core/config_manager.js'; import {type NodeCommand} from '../../../src/commands/node/index.js'; @@ -51,7 +51,7 @@ argv[flags.chartDirectory.name] = process.env.SOLO_CHARTS_DIR ?? undefined; e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefined, undefined, true, bootstrapResp => { describe('AccountCommand', async () => { let accountCmd: AccountCommand; - let k8: K8; + let k8Factory: K8Factory; let accountManager: AccountManager; let configManager: ConfigManager; let nodeCmd: NodeCommand; @@ -59,7 +59,7 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin before(() => { accountCmd = new AccountCommand(bootstrapResp.opts, testSystemAccounts); bootstrapResp.cmd.accountCmd = accountCmd; - k8 = bootstrapResp.opts.k8; + k8Factory = bootstrapResp.opts.k8Factory; accountManager = bootstrapResp.opts.accountManager; configManager = bootstrapResp.opts.configManager; nodeCmd = bootstrapResp.cmd.nodeCmd; @@ -69,7 +69,7 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin this.timeout(Duration.ofMinutes(3).toMillis()); await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); await accountManager.close(); await nodeCmd.close(); }); @@ -77,7 +77,8 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin describe('node proxies should be UP', () => { for (const nodeAlias of argv[flags.nodeAliasesUnparsed.name].split(',')) { it(`proxy should be UP: ${nodeAlias} `, async () => { - await k8 + await k8Factory + .default() .pods() .waitForReadyStatus( namespace, diff --git a/test/e2e/commands/cluster.test.ts b/test/e2e/commands/cluster.test.ts index 9418a060c..8535ea2e3 100644 --- a/test/e2e/commands/cluster.test.ts +++ b/test/e2e/commands/cluster.test.ts @@ -46,7 +46,7 @@ describe('ClusterCommand', () => { argv[flags.chartDirectory.name] = process.env.SOLO_CHARTS_DIR ?? undefined; const bootstrapResp = bootstrapTestVariables(testName, argv); - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; const configManager = bootstrapResp.opts.configManager; const chartManager = bootstrapResp.opts.chartManager; @@ -55,7 +55,7 @@ describe('ClusterCommand', () => { after(async function () { this.timeout(Duration.ofMinutes(3).toMillis()); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); argv[flags.clusterSetupNamespace.name] = constants.SOLO_SETUP_NAMESPACE.name; configManager.update(argv); await clusterCmd.handlers.setup(argv); // restore solo-cluster-setup for other e2e tests to leverage diff --git a/test/e2e/commands/mirror_node.test.ts b/test/e2e/commands/mirror_node.test.ts index ed5278a12..fda31cbee 100644 --- a/test/e2e/commands/mirror_node.test.ts +++ b/test/e2e/commands/mirror_node.test.ts @@ -50,7 +50,7 @@ argv[flags.pinger.name] = true; argv[flags.enableHederaExplorerTls.name] = true; e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefined, undefined, true, bootstrapResp => { describe('MirrorNodeCommand', async () => { - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; const mirrorNodeCmd = new MirrorNodeCommand(bootstrapResp.opts); const explorerCommand = new ExplorerCommand(bootstrapResp.opts); const downloader = new PackageDownloader(mirrorNodeCmd.logger); @@ -68,7 +68,7 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin this.timeout(Duration.ofMinutes(3).toMillis()); await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); await accountManager.close(); bootstrapResp.opts.logger.showUser(`------------------------- END: ${testName} ----------------------------`); @@ -114,10 +114,14 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin await accountManager.loadNodeClient(namespace); try { // find hedera explorer pod - const pods: V1Pod[] = await k8.pods().list(namespace, ['app.kubernetes.io/component=hedera-explorer']); + const pods: V1Pod[] = await k8Factory + .default() + .pods() + .list(namespace, ['app.kubernetes.io/component=hedera-explorer']); const explorerPod = pods[0]; - portForwarder = await k8 + portForwarder = await k8Factory + .default() .pods() .readByRef(PodRef.of(namespace, PodName.of(explorerPod.metadata.name))) .portForward(8_080, 8_080); @@ -204,7 +208,7 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin } await sleep(Duration.ofSeconds(1)); expect(receivedMessage).to.equal(testMessage); - await k8.pods().readByRef(null).stopPortForward(portForwarder); + await k8Factory.default().pods().readByRef(null).stopPortForward(portForwarder); } catch (e) { mirrorNodeCmd.logger.showUserError(e); expect.fail(); diff --git a/test/e2e/commands/network.test.ts b/test/e2e/commands/network.test.ts index c7b12d8d0..70f88d8a7 100644 --- a/test/e2e/commands/network.test.ts +++ b/test/e2e/commands/network.test.ts @@ -41,7 +41,7 @@ describe('NetworkCommand', () => { argv[flags.quiet.name] = true; const bootstrapResp = bootstrapTestVariables(testName, argv); - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; const accountManager = bootstrapResp.opts.accountManager; const configManager = bootstrapResp.opts.configManager; @@ -54,7 +54,7 @@ describe('NetworkCommand', () => { this.timeout(Duration.ofMinutes(3).toMillis()); await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); await accountManager.close(); }); @@ -75,10 +75,13 @@ describe('NetworkCommand', () => { // check pod names should match expected values await expect( - k8.pods().read(PodRef.of(namespace, PodName.of('network-node1-0'))), + k8Factory + .default() + .pods() + .read(PodRef.of(namespace, PodName.of('network-node1-0'))), ).eventually.to.have.nested.property('metadata.name', 'network-node1-0'); // get list of pvc using k8 pvcs list function and print to log - const pvcs = await k8.pvcs().list(namespace, []); + const pvcs = await k8Factory.default().pvcs().list(namespace, []); networkCmd.logger.showList('PVCs', pvcs); expect(networkCmd.getUnusedConfigs(NetworkCommand.DEPLOY_CONFIGS_NAME)).to.deep.equal([ @@ -125,12 +128,12 @@ describe('NetworkCommand', () => { const destroyResult = await networkCmd.destroy(argv); expect(destroyResult).to.be.true; - while ((await k8.pods().list(namespace, ['solo.hedera.com/type=network-node'])).length > 0) { + while ((await k8Factory.default().pods().list(namespace, ['solo.hedera.com/type=network-node'])).length > 0) { networkCmd.logger.debug('Pods are still running. Waiting...'); await sleep(Duration.ofSeconds(3)); } - while ((await k8.pods().list(namespace, ['app=minio'])).length > 0) { + while ((await k8Factory.default().pods().list(namespace, ['app=minio'])).length > 0) { networkCmd.logger.showUser('Waiting for minio container to be deleted...'); await sleep(Duration.ofSeconds(3)); } @@ -143,10 +146,10 @@ describe('NetworkCommand', () => { expect(chartInstalledStatus).to.be.false; // check if pvc are deleted - await expect(k8.pvcs().list(namespace, [])).eventually.to.have.lengthOf(0); + await expect(k8Factory.default().pvcs().list(namespace, [])).eventually.to.have.lengthOf(0); // check if secrets are deleted - await expect(k8.secrets().list(namespace)).eventually.to.have.lengthOf(0); + await expect(k8Factory.default().secrets().list(namespace)).eventually.to.have.lengthOf(0); } catch (e) { networkCmd.logger.showUserError(e); expect.fail(); diff --git a/test/e2e/commands/node_delete.test.ts b/test/e2e/commands/node_delete.test.ts index ef76212ee..d236b61cb 100644 --- a/test/e2e/commands/node_delete.test.ts +++ b/test/e2e/commands/node_delete.test.ts @@ -54,12 +54,12 @@ e2eTestSuite( describe('Node delete', async () => { const nodeCmd = bootstrapResp.cmd.nodeCmd; const accountCmd = bootstrapResp.cmd.accountCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; after(async function () { this.timeout(Duration.ofMinutes(10).toMillis()); await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); it('should succeed with init command', async () => { @@ -84,10 +84,11 @@ e2eTestSuite( it('config.txt should no longer contain removed node alias name', async () => { // read config.txt file from first node, read config.txt line by line, it should not contain value of nodeAlias - const pods: V1Pod[] = await k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, ['solo.hedera.com/type=network-node']); const podName: PodName = PodName.of(pods[0].metadata.name); const tmpDir = getTmpDir(); - await k8 + await k8Factory + .default() .containers() .readByRef(ContainerRef.of(PodRef.of(namespace, podName), ROOT_CONTAINER)) .copyFrom(`${HEDERA_HAPI_PATH}/config.txt`, tmpDir); diff --git a/test/e2e/commands/node_local_hedera.test.ts b/test/e2e/commands/node_local_hedera.test.ts index 1a25d9a86..4bcf280ea 100644 --- a/test/e2e/commands/node_local_hedera.test.ts +++ b/test/e2e/commands/node_local_hedera.test.ts @@ -7,7 +7,7 @@ import {Flags as flags} from '../../../src/commands/flags.js'; import {e2eTestSuite, getDefaultArgv, TEST_CLUSTER} from '../../test_util.js'; import {sleep} from '../../../src/core/helpers.js'; import {SOLO_LOGS_DIR} from '../../../src/core/constants.js'; -import {type K8} from '../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../src/core/kube/k8_factory.js'; import path from 'path'; import {expect} from 'chai'; import {AccountBalanceQuery, AccountCreateTransaction, Hbar, HbarUnit, PrivateKey} from '@hashgraph/sdk'; @@ -32,7 +32,7 @@ argv[flags.clusterName.name] = TEST_CLUSTER; argv[flags.chartDirectory.name] = process.env.SOLO_CHARTS_DIR ?? undefined; argv[flags.quiet.name] = true; -let hederaK8: K8; +let k8Factory: K8Factory; console.log('Starting local build for Hedera app'); argv[flags.localBuildPath.name] = 'node1=../hedera-services/hedera-node/data/,../hedera-services/hedera-node/data'; argv[flags.namespace.name] = LOCAL_HEDERA.name; @@ -58,7 +58,7 @@ e2eTestSuite( nodeCmd = bootstrapResp.cmd.nodeCmd; accountCmd = bootstrapResp.cmd.accountCmd; accountManager = bootstrapResp.manager.accountManager; - hederaK8 = bootstrapResp.opts.k8; + k8Factory = bootstrapResp.opts.k8Factory; }); it('save the state and restart the node with saved state', async () => { @@ -105,7 +105,7 @@ e2eTestSuite( it('get the logs and delete the namespace', async () => { await accountManager.close(); await container.resolve(InjectTokens.NetworkNodes).getLogs(LOCAL_HEDERA); - await hederaK8.namespaces().delete(LOCAL_HEDERA); + await k8Factory.default().namespaces().delete(LOCAL_HEDERA); }).timeout(Duration.ofMinutes(10).toMillis()); }); }, diff --git a/test/e2e/commands/node_local_ptt.test.ts b/test/e2e/commands/node_local_ptt.test.ts index e04b9a0c4..4987537a3 100644 --- a/test/e2e/commands/node_local_ptt.test.ts +++ b/test/e2e/commands/node_local_ptt.test.ts @@ -6,7 +6,7 @@ import {describe} from 'mocha'; import {Flags as flags} from '../../../src/commands/flags.js'; import {e2eTestSuite, getDefaultArgv, TEST_CLUSTER} from '../../test_util.js'; import {Duration} from '../../../src/core/time/duration.js'; -import {type K8} from '../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../src/core/kube/k8_factory.js'; import {LOCAL_HEDERA_PLATFORM_VERSION} from '../../../version.js'; import {NamespaceName} from '../../../src/core/kube/resources/namespace/namespace_name.js'; import {type NetworkNodes} from '../../../src/core/network_nodes.js'; @@ -43,15 +43,15 @@ e2eTestSuite( true, bootstrapResp => { describe('Node for platform app should start successfully', () => { - let pttK8: K8; + let k8Factory: K8Factory; before(() => { - pttK8 = bootstrapResp.opts.k8; + k8Factory = bootstrapResp.opts.k8Factory; }); it('get the logs and delete the namespace', async () => { await container.resolve(InjectTokens.NetworkNodes).getLogs(LOCAL_PTT); - await pttK8.namespaces().delete(LOCAL_PTT); + await k8Factory.default().namespaces().delete(LOCAL_PTT); }).timeout(Duration.ofMinutes(2).toMillis()); }); }, diff --git a/test/e2e/commands/node_update.test.ts b/test/e2e/commands/node_update.test.ts index 12220d4b5..7a18b4dab 100644 --- a/test/e2e/commands/node_update.test.ts +++ b/test/e2e/commands/node_update.test.ts @@ -63,7 +63,7 @@ e2eTestSuite( describe('Node update', async () => { const nodeCmd = bootstrapResp.cmd.nodeCmd; const accountCmd = bootstrapResp.cmd.accountCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; let existingServiceMap; let existingNodeIdsPrivateKeysHash; @@ -72,12 +72,16 @@ e2eTestSuite( await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); await nodeCmd.handlers.stop(argv); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); it('cache current version of private keys', async () => { existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); - existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); + existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( + existingServiceMap, + k8Factory, + getTmpDir(), + ); }).timeout(defaultTimeout); it('should succeed with init command', async () => { @@ -119,7 +123,11 @@ e2eTestSuite( accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, updateNodeId); it('signing key and tls key should not match previous one', async () => { - const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); + const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( + existingServiceMap, + k8Factory, + getTmpDir(), + ); for (const [nodeAlias, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeAlias); @@ -143,10 +151,11 @@ e2eTestSuite( it('config.txt should be changed with new account id', async () => { // read config.txt file from first node, read config.txt line by line, it should not contain value of newAccountId - const pods: V1Pod[] = await k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, ['solo.hedera.com/type=network-node']); const podName: PodName = PodName.of(pods[0].metadata.name); const tmpDir = getTmpDir(); - await k8 + await k8Factory + .default() .containers() .readByRef(ContainerRef.of(PodRef.of(namespace, podName), ROOT_CONTAINER)) .copyFrom(`${HEDERA_HAPI_PATH}/config.txt`, tmpDir); diff --git a/test/e2e/commands/node_upgrade.test.ts b/test/e2e/commands/node_upgrade.test.ts index badf6dcd0..db99373c2 100644 --- a/test/e2e/commands/node_upgrade.test.ts +++ b/test/e2e/commands/node_upgrade.test.ts @@ -49,13 +49,13 @@ e2eTestSuite( describe('Node upgrade', async () => { const nodeCmd = bootstrapResp.cmd.nodeCmd; const accountCmd = bootstrapResp.cmd.accountCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; after(async function () { this.timeout(Duration.ofMinutes(10).toMillis()); await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); it('should succeed with init command', async () => { @@ -90,9 +90,10 @@ e2eTestSuite( it('network nodes version file was upgraded', async () => { // copy the version.txt file from the pod data/upgrade/current directory const tmpDir = getTmpDir(); - const pods: V1Pod[] = await k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, ['solo.hedera.com/type=network-node']); const podName: PodName = PodName.of(pods[0].metadata.name); - await k8 + await k8Factory + .default() .containers() .readByRef(ContainerRef.of(PodRef.of(namespace, podName), ROOT_CONTAINER)) .copyFrom(`${HEDERA_HAPI_PATH}/data/upgrade/current/version.txt`, tmpDir); diff --git a/test/e2e/commands/relay.test.ts b/test/e2e/commands/relay.test.ts index fe964b41b..817de39b0 100644 --- a/test/e2e/commands/relay.test.ts +++ b/test/e2e/commands/relay.test.ts @@ -32,13 +32,13 @@ argv[flags.quiet.name] = true; e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefined, undefined, true, bootstrapResp => { describe('RelayCommand', async () => { - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; const configManager = bootstrapResp.opts.configManager; const relayCmd = new RelayCommand(bootstrapResp.opts); after(async () => { await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); afterEach(async () => await sleep(Duration.ofMillis(5))); diff --git a/test/e2e/commands/separate_node_add.test.ts b/test/e2e/commands/separate_node_add.test.ts index 80f5574c4..f6989f9c2 100644 --- a/test/e2e/commands/separate_node_add.test.ts +++ b/test/e2e/commands/separate_node_add.test.ts @@ -61,7 +61,7 @@ e2eTestSuite( const nodeCmd = bootstrapResp.cmd.nodeCmd; const accountCmd = bootstrapResp.cmd.accountCmd; const networkCmd = bootstrapResp.cmd.networkCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; let existingServiceMap; let existingNodeIdsPrivateKeysHash; @@ -73,13 +73,17 @@ e2eTestSuite( await nodeCmd.accountManager.close(); await nodeCmd.handlers.stop(argv); await networkCmd.destroy(argv); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); it('cache current version of private keys', async () => { // @ts-ignore existingServiceMap = await nodeCmd.accountManager.getNodeServiceMap(namespace); - existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); + existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( + existingServiceMap, + k8Factory, + getTmpDir(), + ); }).timeout(defaultTimeout); it('should succeed with init command', async () => { @@ -110,7 +114,11 @@ e2eTestSuite( accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); it('existing nodes private keys should not have changed', async () => { - const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); + const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( + existingServiceMap, + k8Factory, + getTmpDir(), + ); for (const [nodeAlias, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeAlias); diff --git a/test/e2e/commands/separate_node_delete.test.ts b/test/e2e/commands/separate_node_delete.test.ts index 6e732d186..ba244890e 100644 --- a/test/e2e/commands/separate_node_delete.test.ts +++ b/test/e2e/commands/separate_node_delete.test.ts @@ -62,13 +62,13 @@ e2eTestSuite( describe('Node delete via separated commands', async () => { const nodeCmd = bootstrapResp.cmd.nodeCmd; const accountCmd = bootstrapResp.cmd.accountCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; after(async function () { this.timeout(Duration.ofMinutes(10).toMillis()); await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); it('should succeed with init command', async () => { @@ -97,12 +97,16 @@ e2eTestSuite( it('config.txt should no longer contain removed nodeAlias', async () => { // read config.txt file from first node, read config.txt line by line, it should not contain value of nodeAlias - const pods: V1Pod[] = await k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, ['solo.hedera.com/type=network-node']); const podName: PodName = PodName.of(pods[0].metadata.name); const podRef: PodRef = PodRef.of(namespace, podName); const containerRef: ContainerRef = ContainerRef.of(podRef, ROOT_CONTAINER); const tmpDir: string = getTmpDir(); - await k8.containers().readByRef(containerRef).copyFrom(`${HEDERA_HAPI_PATH}/config.txt`, tmpDir); + await k8Factory + .default() + .containers() + .readByRef(containerRef) + .copyFrom(`${HEDERA_HAPI_PATH}/config.txt`, tmpDir); const configTxt: string = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8'); console.log('config.txt:', configTxt); expect(configTxt).not.to.contain(nodeAlias); diff --git a/test/e2e/commands/separate_node_update.test.ts b/test/e2e/commands/separate_node_update.test.ts index 8ad513b23..2177de5cc 100644 --- a/test/e2e/commands/separate_node_update.test.ts +++ b/test/e2e/commands/separate_node_update.test.ts @@ -63,7 +63,7 @@ e2eTestSuite( describe('Node update via separated commands', async () => { const nodeCmd = bootstrapResp.cmd.nodeCmd; const accountCmd = bootstrapResp.cmd.accountCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; let existingServiceMap; let existingNodeIdsPrivateKeysHash; @@ -72,12 +72,16 @@ e2eTestSuite( await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); await nodeCmd.handlers.stop(argv); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); it('cache current version of private keys', async () => { existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); - existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); + existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( + existingServiceMap, + k8Factory, + getTmpDir(), + ); }).timeout(Duration.ofMinutes(8).toMillis()); it('should succeed with init command', async () => { @@ -130,7 +134,11 @@ e2eTestSuite( accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, updateNodeId); it('signing key and tls key should not match previous one', async () => { - const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); + const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( + existingServiceMap, + k8Factory, + getTmpDir(), + ); for (const [nodeAlias, existingKeyHashMap] of existingNodeIdsPrivateKeysHash.entries()) { const currentNodeKeyHashMap = currentNodeIdsPrivateKeysHash.get(nodeAlias); @@ -154,12 +162,16 @@ e2eTestSuite( it('config.txt should be changed with new account id', async () => { // read config.txt file from first node, read config.txt line by line, it should not contain value of newAccountId - const pods: V1Pod[] = await k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, ['solo.hedera.com/type=network-node']); const podName: PodName = PodName.of(pods[0].metadata.name); const podRef: PodRef = PodRef.of(namespace, podName); const containerRef: ContainerRef = ContainerRef.of(podRef, ROOT_CONTAINER); const tmpDir: string = getTmpDir(); - await k8.containers().readByRef(containerRef).copyFrom(`${HEDERA_HAPI_PATH}/config.txt`, tmpDir); + await k8Factory + .default() + .containers() + .readByRef(containerRef) + .copyFrom(`${HEDERA_HAPI_PATH}/config.txt`, tmpDir); const configTxt: string = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8'); console.log('config.txt:', configTxt); diff --git a/test/e2e/commands/separate_node_upgrade.test.ts b/test/e2e/commands/separate_node_upgrade.test.ts index 9d4ccedb1..f1b4eb63c 100644 --- a/test/e2e/commands/separate_node_upgrade.test.ts +++ b/test/e2e/commands/separate_node_upgrade.test.ts @@ -49,13 +49,13 @@ e2eTestSuite( describe('Node upgrade', async () => { const nodeCmd = bootstrapResp.cmd.nodeCmd; const accountCmd = bootstrapResp.cmd.accountCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; after(async function () { this.timeout(Duration.ofMinutes(10).toMillis()); await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); it('should succeed with init command', async () => { @@ -96,11 +96,12 @@ e2eTestSuite( it('network nodes version file was upgraded', async () => { // copy the version.txt file from the pod data/upgrade/current directory const tmpDir: string = getTmpDir(); - const pods: V1Pod[] = await k8.pods().list(namespace, ['solo.hedera.com/type=network-node']); + const pods: V1Pod[] = await k8Factory.default().pods().list(namespace, ['solo.hedera.com/type=network-node']); const podName: PodName = PodName.of(pods[0].metadata.name); const podRef: PodRef = PodRef.of(namespace, podName); const containerRef: ContainerRef = ContainerRef.of(podRef, ROOT_CONTAINER); - await k8 + await k8Factory + .default() .containers() .readByRef(containerRef) .copyFrom(`${HEDERA_HAPI_PATH}/data/upgrade/current/version.txt`, tmpDir); diff --git a/test/e2e/e2e_node_util.ts b/test/e2e/e2e_node_util.ts index 9f1765d26..266998db0 100644 --- a/test/e2e/e2e_node_util.ts +++ b/test/e2e/e2e_node_util.ts @@ -18,7 +18,7 @@ import * as NodeCommandConfigs from '../../src/commands/node/configs.js'; import {type NodeAlias} from '../../src/types/aliases.js'; import {type ListrTaskWrapper} from 'listr2'; import {type ConfigManager} from '../../src/core/config_manager.js'; -import {type K8} from '../../src/core/kube/k8.js'; +import {type K8Factory} from '../../src/core/kube/k8_factory.js'; import {type NodeCommand} from '../../src/commands/node/index.js'; import {Duration} from '../../src/core/time/duration.js'; import {container} from 'tsyringe-neo'; @@ -58,7 +58,7 @@ export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag describe(`NodeCommand [testName ${testName}, mode ${mode}, release ${releaseTag}]`, async () => { const accountManager = bootstrapResp.opts.accountManager; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; const nodeCmd = bootstrapResp.cmd.nodeCmd; afterEach(async function () { @@ -72,7 +72,7 @@ export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag this.timeout(Duration.ofMinutes(10).toMillis()); await container.resolve(InjectTokens.NetworkNodes).getLogs(namespace); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); describe(`Node should have started successfully [mode ${mode}, release ${releaseTag}]`, () => { @@ -83,7 +83,10 @@ export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag it(`Node Proxy should be UP [mode ${mode}, release ${releaseTag}`, async () => { try { const labels = ['app=haproxy-node1', 'solo.hedera.com/type=haproxy']; - const readyPods: V1Pod[] = await k8.pods().waitForReadyStatus(namespace, labels, 300, 1000); + const readyPods: V1Pod[] = await k8Factory + .default() + .pods() + .waitForReadyStatus(namespace, labels, 300, 1000); expect(readyPods).to.not.be.null; expect(readyPods).to.not.be.undefined; expect(readyPods.length).to.be.greaterThan(0); @@ -102,9 +105,9 @@ export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag before(async function () { this.timeout(Duration.ofMinutes(2).toMillis()); - const podName = await nodeRefreshTestSetup(argv, testName, k8, nodeAlias); + const podName = await nodeRefreshTestSetup(argv, testName, k8Factory, nodeAlias); if (mode === 'kill') { - await k8.pods().readByRef(PodRef.of(namespace, podName)).killPod(); + await k8Factory.default().pods().readByRef(PodRef.of(namespace, podName)).killPod(); } else if (mode === 'stop') { expect(await nodeCmd.handlers.stop(argv)).to.be.true; await sleep(Duration.ofSeconds(20)); // give time for node to stop and update its logs @@ -181,12 +184,18 @@ export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag }).timeout(defaultTimeout); } - async function nodeRefreshTestSetup(argv: Record, testName: string, k8: K8, nodeAliases: string) { + async function nodeRefreshTestSetup( + argv: Record, + testName: string, + k8Factory: K8Factory, + nodeAliases: string, + ) { argv[flags.nodeAliasesUnparsed.name] = nodeAliases; const configManager: ConfigManager = container.resolve(InjectTokens.ConfigManager); configManager.update(argv); - const podArray = await k8 + const podArray = await k8Factory + .default() .pods() .list(configManager.getFlag(flags.namespace), [ `app=network-${nodeAliases}`, diff --git a/test/e2e/integration/commands/init.test.ts b/test/e2e/integration/commands/init.test.ts index ba65c1416..d120d04ca 100644 --- a/test/e2e/integration/commands/init.test.ts +++ b/test/e2e/integration/commands/init.test.ts @@ -9,7 +9,7 @@ import {type DependencyManager} from '../../../../src/core/dependency_managers/i import {type Helm} from '../../../../src/core/helm.js'; import {type ChartManager} from '../../../../src/core/chart_manager.js'; import {type ConfigManager} from '../../../../src/core/config_manager.js'; -import {type K8} from '../../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../../src/core/kube/k8_factory.js'; import {K8Client} from '../../../../src/core/kube/k8_client/k8_client.js'; import {LocalConfig} from '../../../../src/core/config/local_config.js'; import {type KeyManager} from '../../../../src/core/key_manager.js'; @@ -30,7 +30,7 @@ describe('InitCommand', () => { const chartManager: ChartManager = container.resolve(InjectTokens.ChartManager); const configManager: ConfigManager = container.resolve(InjectTokens.ConfigManager); - let k8: K8; + let k8Factory: K8Factory; let localConfig: LocalConfig; const keyManager: KeyManager = container.resolve(InjectTokens.KeyManager); @@ -44,7 +44,7 @@ describe('InitCommand', () => { before(() => { sandbox = sinon.createSandbox(); sandbox.stub(K8Client.prototype, 'init').callsFake(() => this); - k8 = container.resolve(InjectTokens.K8); + k8Factory = container.resolve(InjectTokens.K8Factory); localConfig = new LocalConfig(path.join(BASE_TEST_DIR, 'local-config.yaml')); remoteConfigManager = container.resolve(InjectTokens.RemoteConfigManager); leaseManager = container.resolve(InjectTokens.LeaseManager); @@ -53,7 +53,7 @@ describe('InitCommand', () => { initCmd = new InitCommand({ logger: testLogger, helm, - k8, + k8Factory, chartManager, configManager, depManager, diff --git a/test/e2e/integration/core/account_manager.test.ts b/test/e2e/integration/core/account_manager.test.ts index e51fb5a2f..1943ffcc4 100644 --- a/test/e2e/integration/core/account_manager.test.ts +++ b/test/e2e/integration/core/account_manager.test.ts @@ -9,7 +9,7 @@ import {e2eTestSuite, getDefaultArgv, TEST_CLUSTER} from '../../../test_util.js' import * as version from '../../../../version.js'; import {PodName} from '../../../../src/core/kube/resources/pod/pod_name.js'; import {Duration} from '../../../../src/core/time/duration.js'; -import {type K8} from '../../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../../src/core/kube/k8_factory.js'; import {type AccountManager} from '../../../../src/core/account_manager.js'; import {NamespaceName} from '../../../../src/core/kube/resources/namespace/namespace_name.js'; import {PodRef} from '../../../../src/core/kube/resources/pod/pod_ref.js'; @@ -38,18 +38,18 @@ e2eTestSuite( true, bootstrapResp => { describe('AccountManager', async () => { - let k8: K8; + let k8Factory: K8Factory; let accountManager: AccountManager; before(() => { - k8 = bootstrapResp.opts.k8; + k8Factory = bootstrapResp.opts.k8Factory; accountManager = bootstrapResp.opts.accountManager; }); after(async function () { this.timeout(Duration.ofMinutes(3).toMillis()); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); await accountManager.close(); }); @@ -68,7 +68,9 @@ e2eTestSuite( // ports should be opened // @ts-expect-error - TS2341: Property _portForwards is private and only accessible within class AccountManager - accountManager._portForwards.push(await k8.pods().readByRef(podRef).portForward(localPort, podPort)); + accountManager._portForwards.push( + await k8Factory.default().pods().readByRef(podRef).portForward(localPort, podPort), + ); // ports should be closed await accountManager.close(); diff --git a/test/e2e/integration/core/k8_e2e.test.ts b/test/e2e/integration/core/k8_e2e.test.ts index 36b474b9e..f390394f0 100644 --- a/test/e2e/integration/core/k8_e2e.test.ts +++ b/test/e2e/integration/core/k8_e2e.test.ts @@ -27,12 +27,18 @@ import {ContainerRef} from '../../../../src/core/kube/resources/container/contai import {ServiceRef} from '../../../../src/core/kube/resources/service/service_ref.js'; import {ServiceName} from '../../../../src/core/kube/resources/service/service_name.js'; import {InjectTokens} from '../../../../src/core/dependency_injection/inject_tokens.js'; -import {type K8} from '../../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../../src/core/kube/k8_factory.js'; const defaultTimeout = Duration.ofMinutes(2).toMillis(); -async function createPod(podRef: PodRef, containerName: ContainerName, podLabelValue: string, k8: K8): Promise { - await k8 +async function createPod( + podRef: PodRef, + containerName: ContainerName, + podLabelValue: string, + k8Factory: K8Factory, +): Promise { + await k8Factory + .default() .pods() .create( podRef, @@ -47,7 +53,7 @@ async function createPod(podRef: PodRef, containerName: ContainerName, podLabelV describe('K8', () => { const testLogger = logging.NewLogger('debug', true); const configManager: ConfigManager = container.resolve(InjectTokens.ConfigManager); - const k8: K8 = container.resolve(InjectTokens.K8); + const k8Factory: K8Factory = container.resolve(InjectTokens.K8Factory); const testNamespace = NamespaceName.of('k8-e2e'); const argv = []; const podName = PodName.of(`test-pod-${uuid4()}`); @@ -61,13 +67,13 @@ describe('K8', () => { try { argv[flags.namespace.name] = testNamespace.name; configManager.update(argv); - if (!(await k8.namespaces().has(testNamespace))) { - await k8.namespaces().create(testNamespace); + if (!(await k8Factory.default().namespaces().has(testNamespace))) { + await k8Factory.default().namespaces().create(testNamespace); } - await createPod(podRef, containerName, podLabelValue, k8); + await createPod(podRef, containerName, podLabelValue, k8Factory); const serviceRef: ServiceRef = ServiceRef.of(testNamespace, ServiceName.of(serviceName)); - await k8.services().create(serviceRef, {app: 'svc-test'}, 80, 80); + await k8Factory.default().services().create(serviceRef, {app: 'svc-test'}, 80, 80); } catch (e) { console.log(`${e}, ${e.stack}`); throw e; @@ -77,7 +83,7 @@ describe('K8', () => { after(async function () { this.timeout(defaultTimeout); try { - await k8.pods().readByRef(PodRef.of(testNamespace, podName)).killPod(); + await k8Factory.default().pods().readByRef(PodRef.of(testNamespace, podName)).killPod(); argv[flags.namespace.name] = constants.SOLO_SETUP_NAMESPACE.name; configManager.update(argv); } catch (e) { @@ -87,47 +93,54 @@ describe('K8', () => { }); it('should be able to list clusters', async () => { - const clusters = k8.clusters().list(); + const clusters = k8Factory.default().clusters().list(); expect(clusters).not.to.have.lengthOf(0); }).timeout(defaultTimeout); it('should be able to list namespaces', async () => { - const namespaces = await k8.namespaces().list(); + const namespaces = await k8Factory.default().namespaces().list(); expect(namespaces).not.to.have.lengthOf(0); const match = namespaces.filter(n => n.name === constants.DEFAULT_NAMESPACE.name); expect(match).to.have.lengthOf(1); }).timeout(defaultTimeout); it('should be able to list context names', () => { - const contexts = k8.contexts().list(); + const contexts = k8Factory.default().contexts().list(); expect(contexts).not.to.have.lengthOf(0); }).timeout(defaultTimeout); it('should be able to create and delete a namespaces', async () => { const name = uuid4(); - expect(await k8.namespaces().create(NamespaceName.of(name))).to.be.true; - expect(await k8.namespaces().delete(NamespaceName.of(name))).to.be.true; + expect(await k8Factory.default().namespaces().create(NamespaceName.of(name))).to.be.true; + expect(await k8Factory.default().namespaces().delete(NamespaceName.of(name))).to.be.true; }).timeout(defaultTimeout); it('should be able to run wait for pod', async () => { const labels = [`app=${podLabelValue}`]; - const pods = await k8.pods().waitForRunningPhase(testNamespace, labels, 30, constants.PODS_RUNNING_DELAY); + const pods = await k8Factory + .default() + .pods() + .waitForRunningPhase(testNamespace, labels, 30, constants.PODS_RUNNING_DELAY); expect(pods).to.have.lengthOf(1); }).timeout(defaultTimeout); it('should be able to run wait for pod ready', async () => { const labels = [`app=${podLabelValue}`]; - const pods = await k8.pods().waitForReadyStatus(testNamespace, labels, 100); + const pods = await k8Factory.default().pods().waitForReadyStatus(testNamespace, labels, 100); expect(pods).to.have.lengthOf(1); }).timeout(defaultTimeout); it('should be able to check if a path is directory inside a container', async () => { - const pods = await k8.pods().list(testNamespace, [`app=${podLabelValue}`]); + const pods = await k8Factory + .default() + .pods() + .list(testNamespace, [`app=${podLabelValue}`]); const podName = PodName.of(pods[0].metadata.name); expect( - await k8 + await k8Factory + .default() .containers() .readByRef(ContainerRef.of(PodRef.of(testNamespace, podName), containerName)) .hasDir('/tmp'), @@ -138,7 +151,10 @@ describe('K8', () => { each(testCases).describe('test copyTo and copyFrom', localFilePath => { it('should be able to copy a file to and from a container', async () => { - const pods = await k8.pods().waitForReadyStatus(testNamespace, [`app=${podLabelValue}`], 20); + const pods = await k8Factory + .default() + .pods() + .waitForReadyStatus(testNamespace, [`app=${podLabelValue}`], 20); expect(pods).to.have.lengthOf(1); const localTmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'k8-test')); @@ -151,12 +167,20 @@ describe('K8', () => { // upload the file expect( - await k8.containers().readByRef(ContainerRef.of(podRef, containerName)).copyTo(localFilePath, remoteTmpDir), + await k8Factory + .default() + .containers() + .readByRef(ContainerRef.of(podRef, containerName)) + .copyTo(localFilePath, remoteTmpDir), ).to.be.true; // download the same file expect( - await k8.containers().readByRef(ContainerRef.of(podRef, containerName)).copyFrom(remoteFilePath, localTmpDir), + await k8Factory + .default() + .containers() + .readByRef(ContainerRef.of(podRef, containerName)) + .copyFrom(remoteFilePath, localTmpDir), ).to.be.true; const downloadedFilePath = path.join(localTmpDir, fileName); const downloadedFileData = fs.readFileSync(downloadedFilePath); @@ -167,7 +191,8 @@ describe('K8', () => { expect(downloadedFileHash, 'downloaded file hash should match original file hash').to.equal(originalFileHash); // rm file inside the container - await k8 + await k8Factory + .default() .containers() .readByRef(ContainerRef.of(podRef, containerName)) .execContainer(['rm', '-f', remoteFilePath]); @@ -181,7 +206,9 @@ describe('K8', () => { const localPort = +constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT; try { const podRef: PodRef = PodRef.of(testNamespace, podName); - k8.pods() + k8Factory + .default() + .pods() .readByRef(podRef) .portForward(localPort, +constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT) .then(server => { @@ -191,13 +218,13 @@ describe('K8', () => { const s = new net.Socket(); s.on('ready', async () => { s.destroy(); - await k8.pods().readByRef(podRef).stopPortForward(server); + await k8Factory.default().pods().readByRef(podRef).stopPortForward(server); done(); }); s.on('error', async e => { s.destroy(); - await k8.pods().readByRef(podRef).stopPortForward(server); + await k8Factory.default().pods().readByRef(podRef).stopPortForward(server); done(new SoloError(`could not connect to local port '${localPort}': ${e.message}`, e)); }); @@ -211,9 +238,13 @@ describe('K8', () => { }).timeout(defaultTimeout); it('should be able to cat a file inside the container', async () => { - const pods = await k8.pods().list(testNamespace, [`app=${podLabelValue}`]); + const pods = await k8Factory + .default() + .pods() + .list(testNamespace, [`app=${podLabelValue}`]); const podName = PodName.of(pods[0].metadata.name); - const output = await k8 + const output = await k8Factory + .default() .containers() .readByRef(ContainerRef.of(PodRef.of(testNamespace, podName), containerName)) .execContainer(['cat', '/etc/hostname']); @@ -223,14 +254,14 @@ describe('K8', () => { it('should be able to list persistent volume claims', async () => { const pvcRef: PodRef = PodRef.of(testNamespace, PodName.of(`test-pvc-${uuid4()}`)); try { - await k8.pvcs().create(pvcRef, {storage: '50Mi'}, ['ReadWriteOnce']); - const pvcs: string[] = await k8.pvcs().list(testNamespace, undefined); + await k8Factory.default().pvcs().create(pvcRef, {storage: '50Mi'}, ['ReadWriteOnce']); + const pvcs: string[] = await k8Factory.default().pvcs().list(testNamespace, undefined); expect(pvcs).to.have.length.greaterThan(0); } catch (e) { console.error(e); throw e; } finally { - await k8.pvcs().delete(pvcRef); + await k8Factory.default().pvcs().delete(pvcRef); } }).timeout(defaultTimeout); @@ -238,9 +269,12 @@ describe('K8', () => { const podName = PodName.of(`test-pod-${uuid4()}`); const podRef = PodRef.of(testNamespace, podName); const podLabelValue = `test-${uuid4()}`; - await createPod(podRef, containerName, podLabelValue, k8); - await k8.pods().readByRef(podRef).killPod(); - const newPods = await k8.pods().list(testNamespace, [`app=${podLabelValue}`]); + await createPod(podRef, containerName, podLabelValue, k8Factory); + await k8Factory.default().pods().readByRef(podRef).killPod(); + const newPods = await k8Factory + .default() + .pods() + .list(testNamespace, [`app=${podLabelValue}`]); expect(newPods).to.have.lengthOf(0); }); }); diff --git a/test/e2e/integration/core/lease.test.ts b/test/e2e/integration/core/lease.test.ts index 1bb90e711..6cb9f160d 100644 --- a/test/e2e/integration/core/lease.test.ts +++ b/test/e2e/integration/core/lease.test.ts @@ -4,7 +4,7 @@ import {it, describe, before, after} from 'mocha'; import {type ConfigManager} from '../../../../src/core/config_manager.js'; import * as logging from '../../../../src/core/logging.js'; -import {type K8} from '../../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../../src/core/kube/k8_factory.js'; import {expect} from 'chai'; import {IntervalLease} from '../../../../src/core/lease/interval_lease.js'; import {LeaseHolder} from '../../../../src/core/lease/lease_holder.js'; @@ -22,30 +22,37 @@ const leaseDuration = 4; describe('Lease', async () => { const testLogger = logging.NewLogger('debug', true); const configManager: ConfigManager = container.resolve(InjectTokens.ConfigManager); - const k8: K8 = container.resolve(InjectTokens.K8); + const k8Factory: K8Factory = container.resolve(InjectTokens.K8Factory); const testNamespace = NamespaceName.of('lease-e2e'); const renewalService = new NoopLeaseRenewalService(); before(async function () { this.timeout(defaultTimeout); - if (await k8.namespaces().has(testNamespace)) { - await k8.namespaces().delete(testNamespace); + if (await k8Factory.default().namespaces().has(testNamespace)) { + await k8Factory.default().namespaces().delete(testNamespace); await sleep(Duration.ofSeconds(5)); } - await k8.namespaces().create(testNamespace); + await k8Factory.default().namespaces().create(testNamespace); }); after(async function () { this.timeout(defaultTimeout); - await k8.namespaces().delete(testNamespace); + await k8Factory.default().namespaces().delete(testNamespace); }); describe('acquire and release', async function () { this.timeout(defaultTimeout); it('non-expired lease', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); await lease.acquire(); expect(await lease.isAcquired()).to.be.true; @@ -55,9 +62,16 @@ describe('Lease', async () => { }); it('non-expired lease held by another user should not be released', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); const newLease = new IntervalLease( - k8, + k8Factory, renewalService, LeaseHolder.of('other'), testNamespace, @@ -79,9 +93,16 @@ describe('Lease', async () => { }); it('expired lease held by another user should be released', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); const newLease = new IntervalLease( - k8, + k8Factory, renewalService, LeaseHolder.of('other'), testNamespace, @@ -103,7 +124,14 @@ describe('Lease', async () => { }); it('expired lease should be released', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); await lease.acquire(); expect(await lease.isAcquired()).to.be.true; @@ -122,7 +150,14 @@ describe('Lease', async () => { this.timeout(defaultTimeout); it('non-expired lease', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); expect(await lease.tryAcquire()).to.be.true; expect(await lease.isAcquired()).to.be.true; @@ -134,9 +169,16 @@ describe('Lease', async () => { }); it('non-expired lease held by another user should not be released', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); const newLease = new IntervalLease( - k8, + k8Factory, renewalService, LeaseHolder.of('other'), testNamespace, @@ -158,9 +200,16 @@ describe('Lease', async () => { }); it('expired lease held by another user should be released', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); const newLease = new IntervalLease( - k8, + k8Factory, renewalService, LeaseHolder.of('other'), testNamespace, @@ -182,7 +231,14 @@ describe('Lease', async () => { }); it('expired lease should be released', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); expect(await lease.tryAcquire()).to.be.true; expect(await lease.isAcquired()).to.be.true; diff --git a/test/e2e/integration/core/lease_renewal.test.ts b/test/e2e/integration/core/lease_renewal.test.ts index 7a4b2f00b..38b44daf6 100644 --- a/test/e2e/integration/core/lease_renewal.test.ts +++ b/test/e2e/integration/core/lease_renewal.test.ts @@ -4,7 +4,7 @@ import {it, describe, before, after} from 'mocha'; import {type ConfigManager} from '../../../../src/core/config_manager.js'; import * as logging from '../../../../src/core/logging.js'; -import {type K8} from '../../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../../src/core/kube/k8_factory.js'; import {expect} from 'chai'; import {IntervalLease} from '../../../../src/core/lease/interval_lease.js'; import {LeaseHolder} from '../../../../src/core/lease/lease_holder.js'; @@ -22,27 +22,34 @@ const leaseDuration = 4; describe('LeaseRenewalService', async () => { const testLogger = logging.NewLogger('debug', true); const configManager: ConfigManager = container.resolve(InjectTokens.ConfigManager); - const k8: K8 = container.resolve(InjectTokens.K8) as K8; + const k8Factory: K8Factory = container.resolve(InjectTokens.K8Factory) as K8Factory; const renewalService: LeaseRenewalService = container.resolve(InjectTokens.LeaseRenewalService); const testNamespace = NamespaceName.of('lease-renewal-e2e'); before(async function () { this.timeout(defaultTimeout); - if (await k8.namespaces().has(testNamespace)) { - await k8.namespaces().delete(testNamespace); + if (await k8Factory.default().namespaces().has(testNamespace)) { + await k8Factory.default().namespaces().delete(testNamespace); await sleep(Duration.ofSeconds(5)); } - await k8.namespaces().create(testNamespace); + await k8Factory.default().namespaces().create(testNamespace); }); after(async function () { this.timeout(defaultTimeout); - await k8.namespaces().delete(testNamespace); + await k8Factory.default().namespaces().delete(testNamespace); }); it('acquired leases should be scheduled', async () => { - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); await lease.acquire(); expect(lease.scheduleId).to.not.be.null; expect(await renewalService.isScheduled(lease.scheduleId)).to.be.true; @@ -55,7 +62,14 @@ describe('LeaseRenewalService', async () => { it('acquired leases should be renewed', async function () { this.timeout(defaultTimeout); - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); await lease.acquire(); expect(lease.scheduleId).to.not.be.null; expect(await renewalService.isScheduled(lease.scheduleId)).to.be.true; @@ -88,7 +102,14 @@ describe('LeaseRenewalService', async () => { it('acquired leases with cancelled schedules should not be renewed', async function () { this.timeout(defaultTimeout); - const lease = new IntervalLease(k8, renewalService, LeaseHolder.default(), testNamespace, null, leaseDuration); + const lease = new IntervalLease( + k8Factory, + renewalService, + LeaseHolder.default(), + testNamespace, + null, + leaseDuration, + ); await lease.acquire(); expect(lease.scheduleId).to.not.be.null; expect(await renewalService.isScheduled(lease.scheduleId)).to.be.true; @@ -97,7 +118,7 @@ describe('LeaseRenewalService', async () => { expect(await renewalService.isScheduled(lease.scheduleId)).to.be.false; // @ts-ignore - let remoteObject: V1Lease = await lease.retrieveLease(k8); + let remoteObject: V1Lease = await lease.retrieveLease(k8Factory); expect(remoteObject).to.not.be.null; expect(remoteObject?.spec?.renewTime).to.be.undefined; expect(remoteObject?.spec?.acquireTime).to.not.be.undefined; diff --git a/test/e2e/integration/core/platform_installer_e2e.test.ts b/test/e2e/integration/core/platform_installer_e2e.test.ts index 977e7e4cc..5a09ba859 100644 --- a/test/e2e/integration/core/platform_installer_e2e.test.ts +++ b/test/e2e/integration/core/platform_installer_e2e.test.ts @@ -11,7 +11,7 @@ import {e2eTestSuite, getDefaultArgv, getTestCacheDir, TEST_CLUSTER, testLogger} import {Flags as flags} from '../../../../src/commands/flags.js'; import * as version from '../../../../version.js'; import {Duration} from '../../../../src/core/time/duration.js'; -import {type K8} from '../../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../../src/core/kube/k8_factory.js'; import {type AccountManager} from '../../../../src/core/account_manager.js'; import {type PlatformInstaller} from '../../../../src/core/platform_installer.js'; import {NamespaceName} from '../../../../src/core/kube/resources/namespace/namespace_name.js'; @@ -46,7 +46,7 @@ e2eTestSuite( false, bootstrapResp => { describe('Platform Installer E2E', async () => { - let k8: K8; + let k8Factory: K8Factory; let accountManager: AccountManager; let installer: PlatformInstaller; const podName = PodName.of('network-node1-0'); @@ -54,7 +54,7 @@ e2eTestSuite( const packageVersion = 'v0.42.5'; before(() => { - k8 = bootstrapResp.opts.k8; + k8Factory = bootstrapResp.opts.k8Factory; accountManager = bootstrapResp.opts.accountManager; installer = bootstrapResp.opts.platformInstaller; }); @@ -62,7 +62,7 @@ e2eTestSuite( after(async function () { this.timeout(Duration.ofMinutes(3).toMillis()); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); await accountManager.close(); }); @@ -107,7 +107,8 @@ e2eTestSuite( it('should succeed with valid tag and pod', async () => { expect(await installer.fetchPlatform(podRef, packageVersion)).to.be.true; - const outputs = await k8 + const outputs = await k8Factory + .default() .containers() .readByRef(ContainerRef.of(podRef, constants.ROOT_CONTAINER)) .execContainer(`ls -la ${constants.HEDERA_HAPI_PATH}`); diff --git a/test/e2e/integration/core/remote_config_manager.test.ts b/test/e2e/integration/core/remote_config_manager.test.ts index 8e7e43ee0..205ba29fb 100644 --- a/test/e2e/integration/core/remote_config_manager.test.ts +++ b/test/e2e/integration/core/remote_config_manager.test.ts @@ -15,7 +15,7 @@ import {SoloError} from '../../../../src/core/errors.js'; import {RemoteConfigDataWrapper} from '../../../../src/core/config/remote/remote_config_data_wrapper.js'; import {Duration} from '../../../../src/core/time/duration.js'; import {container} from 'tsyringe-neo'; -import {type K8} from '../../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../../src/core/kube/k8_factory.js'; import {NamespaceName} from '../../../../src/core/kube/resources/namespace/namespace_name.js'; import {InjectTokens} from '../../../../src/core/dependency_injection/inject_tokens.js'; @@ -48,7 +48,7 @@ e2eTestSuite( false, bootstrapResp => { describe('RemoteConfigManager', async () => { - let k8: K8; + let k8Factory: K8Factory; let localConfig: LocalConfig; let remoteConfigManager: RemoteConfigManager; @@ -57,13 +57,13 @@ e2eTestSuite( after(async function () { this.timeout(Duration.ofMinutes(3).toMillis()); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); before(function () { this.timeout(defaultTimeout); - k8 = bootstrapResp.opts.k8; + k8Factory = bootstrapResp.opts.k8Factory; localConfig = container.resolve(InjectTokens.LocalConfig); remoteConfigManager = container.resolve(InjectTokens.RemoteConfigManager); diff --git a/test/e2e/integration/core/remote_config_validator.test.ts b/test/e2e/integration/core/remote_config_validator.test.ts index 569551ee5..f9e14fce1 100644 --- a/test/e2e/integration/core/remote_config_validator.test.ts +++ b/test/e2e/integration/core/remote_config_validator.test.ts @@ -26,23 +26,23 @@ import {PodRef} from '../../../../src/core/kube/resources/pod/pod_ref.js'; import {PodName} from '../../../../src/core/kube/resources/pod/pod_name.js'; import {ContainerName} from '../../../../src/core/kube/resources/container/container_name.js'; import {InjectTokens} from '../../../../src/core/dependency_injection/inject_tokens.js'; -import {type K8} from '../../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../../src/core/kube/k8_factory.js'; describe('RemoteConfigValidator', () => { const namespace = NamespaceName.of('remote-config-validator'); let configManager: ConfigManager; - let k8: K8; + let k8Factory: K8Factory; before(async () => { configManager = container.resolve(InjectTokens.ConfigManager); configManager.update({[flags.namespace.name]: namespace}); - k8 = container.resolve(InjectTokens.K8); - await k8.namespaces().create(namespace); + k8Factory = container.resolve(InjectTokens.K8Factory); + await k8Factory.default().namespaces().create(namespace); }); after(async () => { - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); const cluster = 'cluster'; @@ -77,7 +77,8 @@ describe('RemoteConfigValidator', () => { async function createPod(name: string, labels: Record) { try { - await k8 + await k8Factory + .default() .pods() .create( PodRef.of(namespace, PodName.of(name)), @@ -97,7 +98,7 @@ describe('RemoteConfigValidator', () => { it('should fail if component is not present', async () => { try { // @ts-ignore - await Promise.all(RemoteConfigValidator.validateRelays(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateRelays(namespace, components, k8Factory)); throw new Error(); } catch (e) { expect(e).to.be.instanceOf(SoloError); @@ -109,7 +110,7 @@ describe('RemoteConfigValidator', () => { await createPod(relayName, {[key]: value}); // @ts-ignore - await Promise.all(RemoteConfigValidator.validateRelays(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateRelays(namespace, components, k8Factory)); }); }); @@ -117,7 +118,7 @@ describe('RemoteConfigValidator', () => { it('should fail if component is not present', async () => { try { // @ts-ignore - await Promise.all(RemoteConfigValidator.validateHaProxies(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateHaProxies(namespace, components, k8Factory)); throw new Error(); } catch (e) { expect(e).to.be.instanceOf(SoloError); @@ -128,7 +129,7 @@ describe('RemoteConfigValidator', () => { await createPod(haProxyName, {app: haProxyName}); // @ts-ignore - await Promise.all(RemoteConfigValidator.validateHaProxies(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateHaProxies(namespace, components, k8Factory)); }); }); @@ -136,7 +137,7 @@ describe('RemoteConfigValidator', () => { it('should fail if component is not present', async () => { try { // @ts-ignore - await Promise.all(RemoteConfigValidator.validateMirrorNodes(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateMirrorNodes(namespace, components, k8Factory)); throw new Error(); } catch (e) { expect(e).to.be.instanceOf(SoloError); @@ -149,7 +150,7 @@ describe('RemoteConfigValidator', () => { await createPod(mirrorNodeName, {[key1]: value1, [key2]: value2}); // @ts-ignore - await Promise.all(RemoteConfigValidator.validateMirrorNodes(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateMirrorNodes(namespace, components, k8Factory)); }); }); @@ -157,7 +158,7 @@ describe('RemoteConfigValidator', () => { it('should fail if component is not present', async () => { try { // @ts-ignore - await Promise.all(RemoteConfigValidator.validateEnvoyProxies(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateEnvoyProxies(namespace, components, k8Factory)); throw new Error(); } catch (e) { expect(e).to.be.instanceOf(SoloError); @@ -168,7 +169,7 @@ describe('RemoteConfigValidator', () => { await createPod(envoyProxyName, {app: envoyProxyName}); // @ts-ignore - await Promise.all(RemoteConfigValidator.validateEnvoyProxies(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateEnvoyProxies(namespace, components, k8Factory)); }); }); @@ -176,7 +177,7 @@ describe('RemoteConfigValidator', () => { it('should fail if component is not present', async () => { try { // @ts-ignore - await Promise.all(RemoteConfigValidator.validateConsensusNodes(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateConsensusNodes(namespace, components, k8Factory)); throw new Error(); } catch (e) { expect(e).to.be.instanceOf(SoloError); @@ -187,7 +188,7 @@ describe('RemoteConfigValidator', () => { await createPod(nodeAlias, {app: `network-${nodeAlias}`}); // @ts-ignore - await Promise.all(RemoteConfigValidator.validateConsensusNodes(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateConsensusNodes(namespace, components, k8Factory)); }); }); @@ -195,7 +196,7 @@ describe('RemoteConfigValidator', () => { it('should fail if component is not present', async () => { try { // @ts-ignore - await Promise.all(RemoteConfigValidator.validateMirrorNodeExplorers(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateMirrorNodeExplorers(namespace, components, k8Factory)); throw new Error(); } catch (e) { expect(e).to.be.instanceOf(SoloError); @@ -207,7 +208,7 @@ describe('RemoteConfigValidator', () => { await createPod(mirrorNodeExplorerName, {[key]: value}); // @ts-ignore - await Promise.all(RemoteConfigValidator.validateMirrorNodeExplorers(namespace, components, k8)); + await Promise.all(RemoteConfigValidator.validateMirrorNodeExplorers(namespace, components, k8Factory)); }); }); }); diff --git a/test/test_add.ts b/test/test_add.ts index b8f159cd1..2ab02cff0 100644 --- a/test/test_add.ts +++ b/test/test_add.ts @@ -63,7 +63,7 @@ export function testNodeAdd( const nodeCmd = bootstrapResp.cmd.nodeCmd; const accountCmd = bootstrapResp.cmd.accountCmd; const networkCmd = bootstrapResp.cmd.networkCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; let existingServiceMap: Map; let existingNodeIdsPrivateKeysHash: Map>; @@ -74,12 +74,16 @@ export function testNodeAdd( await bootstrapResp.opts.accountManager.close(); await nodeCmd.handlers.stop(argv); await networkCmd.destroy(argv); - await k8.namespaces().delete(namespace); + await k8Factory.default().namespaces().delete(namespace); }); it('cache current version of private keys', async () => { existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); - existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash(existingServiceMap, k8, getTmpDir()); + existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( + existingServiceMap, + k8Factory, + getTmpDir(), + ); }).timeout(defaultTimeout); it('should succeed with init command', async () => { @@ -104,7 +108,7 @@ export function testNodeAdd( it('existing nodes private keys should not have changed', async () => { const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( existingServiceMap, - k8, + k8Factory, getTmpDir(), ); diff --git a/test/test_util.ts b/test/test_util.ts index e9fc73bf8..74c989e5a 100644 --- a/test/test_util.ts +++ b/test/test_util.ts @@ -26,7 +26,7 @@ import {type SoloLogger} from '../src/core/logging.js'; import {type BaseCommand} from '../src/commands/base.js'; import {type NodeAlias} from '../src/types/aliases.js'; import {type NetworkNodeServices} from '../src/core/network_node_services.js'; -import {type K8} from '../src/core/kube/k8.js'; +import {type K8Factory} from '../src/core/kube/k8_factory.js'; import {type AccountManager} from '../src/core/account_manager.js'; import {type PlatformInstaller} from '../src/core/platform_installer.js'; import {type ProfileManager} from '../src/core/profile_manager.js'; @@ -90,7 +90,7 @@ export function getDefaultArgv() { interface TestOpts { logger: SoloLogger; helm: Helm; - k8: K8; + k8Factory: K8Factory; chartManager: ChartManager; configManager: ConfigManager; downloader: PackageDownloader; @@ -126,7 +126,7 @@ interface BootstrapResponse { export function bootstrapTestVariables( testName: string, argv: any, - k8Arg: K8 | null = null, + k8FactoryArg: K8Factory | null = null, initCmdArg: InitCommand | null = null, clusterCmdArg: ClusterCommand | null = null, networkCmdArg: NetworkCommand | null = null, @@ -145,7 +145,7 @@ export function bootstrapTestVariables( const helm: Helm = container.resolve(InjectTokens.Helm); const chartManager: ChartManager = container.resolve(InjectTokens.ChartManager); const keyManager: KeyManager = container.resolve(InjectTokens.KeyManager); - const k8: K8 = k8Arg || container.resolve(InjectTokens.K8); + const k8Factory: K8Factory = k8FactoryArg || container.resolve(InjectTokens.K8Factory); const accountManager: AccountManager = container.resolve(InjectTokens.AccountManager); const platformInstaller: PlatformInstaller = container.resolve(InjectTokens.PlatformInstaller); const profileManager: ProfileManager = container.resolve(InjectTokens.ProfileManager); @@ -158,7 +158,7 @@ export function bootstrapTestVariables( const opts: TestOpts = { logger: testLogger, helm, - k8, + k8Factory, chartManager, configManager, downloader, @@ -200,7 +200,7 @@ export function bootstrapTestVariables( export function e2eTestSuite( testName: string, argv: Record, - k8Arg: K8 | null = null, + k8FactoryArg: K8Factory | null = null, initCmdArg: InitCommand | null = null, clusterCmdArg: ClusterCommand | null = null, networkCmdArg: NetworkCommand | null = null, @@ -212,7 +212,7 @@ export function e2eTestSuite( const bootstrapResp = bootstrapTestVariables( testName, argv, - k8Arg, + k8FactoryArg, initCmdArg, clusterCmdArg, networkCmdArg, @@ -221,7 +221,7 @@ export function e2eTestSuite( ); const namespace = bootstrapResp.namespace; const initCmd = bootstrapResp.cmd.initCmd; - const k8 = bootstrapResp.opts.k8; + const k8Factory = bootstrapResp.opts.k8Factory; const clusterCmd = bootstrapResp.cmd.clusterCmd; const networkCmd = bootstrapResp.cmd.networkCmd; const nodeCmd = bootstrapResp.cmd.nodeCmd; @@ -248,10 +248,10 @@ export function e2eTestSuite( it('should cleanup previous deployment', async () => { await initCmd.init(argv); - if (await k8.namespaces().has(namespace)) { - await k8.namespaces().delete(namespace); + if (await k8Factory.default().namespaces().has(namespace)) { + await k8Factory.default().namespaces().delete(namespace); - while (await k8.namespaces().has(namespace)) { + while (await k8Factory.default().namespaces().has(namespace)) { testLogger.debug(`Namespace ${namespace} still exist. Waiting...`); await sleep(Duration.ofSeconds(2)); } @@ -401,7 +401,7 @@ export function accountCreationShouldSucceed( export async function getNodeAliasesPrivateKeysHash( networkNodeServicesMap: Map, - k8: K8, + k8Factory: K8Factory, destDir: string, ) { const dataKeysDir = path.join(constants.HEDERA_HAPI_PATH, 'data', 'keys'); @@ -416,7 +416,7 @@ export async function getNodeAliasesPrivateKeysHash( } await addKeyHashToMap( networkNodeServices.namespace, - k8, + k8Factory, nodeAlias, dataKeysDir, uniqueNodeDestDir, @@ -425,7 +425,7 @@ export async function getNodeAliasesPrivateKeysHash( ); await addKeyHashToMap( networkNodeServices.namespace, - k8, + k8Factory, nodeAlias, tlsKeysDir, uniqueNodeDestDir, @@ -439,14 +439,15 @@ export async function getNodeAliasesPrivateKeysHash( async function addKeyHashToMap( namespace: NamespaceName, - k8: K8, + k8Factory: K8Factory, nodeAlias: NodeAlias, keyDir: string, uniqueNodeDestDir: string, keyHashMap: Map, privateKeyFileName: string, ) { - await k8 + await k8Factory + .default() .containers() .readByRef(ContainerRef.of(PodRef.of(namespace, Templates.renderNetworkPodName(nodeAlias)), ROOT_CONTAINER)) .copyFrom(path.join(keyDir, privateKeyFileName), uniqueNodeDestDir); diff --git a/test/unit/commands/base.test.ts b/test/unit/commands/base.test.ts index da923dee8..3fcef6a87 100644 --- a/test/unit/commands/base.test.ts +++ b/test/unit/commands/base.test.ts @@ -43,13 +43,13 @@ describe('BaseCommand', () => { sandbox = sinon.createSandbox(); sandbox.stub(K8Client.prototype, 'init').callsFake(() => this); - const k8 = container.resolve(InjectTokens.K8); + const k8Factory = container.resolve(InjectTokens.K8Factory); // @ts-ignore baseCmd = new BaseCommand({ logger: testLogger, helm, - k8, + k8Factory, chartManager, configManager, depManager, diff --git a/test/unit/commands/cluster.test.ts b/test/unit/commands/cluster.test.ts index aea41493d..c6c96a19f 100644 --- a/test/unit/commands/cluster.test.ts +++ b/test/unit/commands/cluster.test.ts @@ -29,6 +29,7 @@ import {type BaseCommand} from '../../../src/commands/base.js'; import {LocalConfig} from '../../../src/core/config/local_config.js'; import {type CommandFlag} from '../../../src/types/flag_types.js'; import {K8Client} from '../../../src/core/kube/k8_client/k8_client.js'; +import {K8ClientFactory} from '../../../src/core/kube/k8_client/k8_client_factory.js'; import {type Cluster, KubeConfig} from '@kubernetes/client-node'; import {RemoteConfigManager} from '../../../src/core/config/remote/remote_config_manager.js'; import {DependencyManager} from '../../../src/core/dependency_managers/index.js'; @@ -53,7 +54,7 @@ import {InjectTokens} from '../../../src/core/dependency_injection/inject_tokens const getBaseCommandOpts = () => ({ logger: sinon.stub(), helm: sinon.stub(), - k8: { + k8Factory: { isMinioInstalled: sinon.stub().returns(false), isPrometheusInstalled: sinon.stub().returns(false), isCertManagerInstalled: sinon.stub().returns(false), @@ -136,7 +137,7 @@ describe('ClusterCommand unit tests', () => { let tasks: ClusterCommandTasks; let command: BaseCommand; let loggerStub: sinon.SinonStubbedInstance; - let k8Stub: sinon.SinonStubbedInstance; + let k8FactoryStub: sinon.SinonStubbedInstance; let remoteConfigManagerStub: sinon.SinonStubbedInstance; let localConfig: LocalConfig; const defaultRemoteConfig = { @@ -157,19 +158,21 @@ describe('ClusterCommand unit tests', () => { }, ) => { const loggerStub = sandbox.createStubInstance(SoloLogger); - k8Stub = sandbox.createStubInstance(K8Client); - const k8StubContexts = sandbox.createStubInstance(K8ClientContexts); - k8StubContexts.list.returns(['context-1', 'context-2', 'context-3']); - k8Stub.contexts.returns(k8StubContexts); + k8FactoryStub = sandbox.createStubInstance(K8ClientFactory); + const k8Stub = sandbox.createStubInstance(K8Client); + k8FactoryStub.default.returns(k8Stub); + const k8ContextsStub = sandbox.createStubInstance(K8ClientContexts); + k8ContextsStub.list.returns(['context-1', 'context-2', 'context-3']); + k8Stub.contexts.returns(k8ContextsStub); const clusterChecksStub = sandbox.createStubInstance(ClusterChecks); clusterChecksStub.isMinioInstalled.returns(new Promise(() => true)); clusterChecksStub.isPrometheusInstalled.returns(new Promise(() => true)); clusterChecksStub.isCertManagerInstalled.returns(new Promise(() => true)); if (opts.testContextConnectionError) { - k8StubContexts.testContextConnection.resolves(false); + k8ContextsStub.testContextConnection.resolves(false); } else { - k8StubContexts.testContextConnection.resolves(true); + k8ContextsStub.testContextConnection.resolves(true); } const kubeConfigClusterObject = { @@ -194,7 +197,7 @@ describe('ClusterCommand unit tests', () => { const k8ClustersStub = sandbox.createStubInstance(K8ClientClusters); k8ClustersStub.readCurrent.returns(kubeConfigClusterObject.name); k8Stub.clusters.returns(k8ClustersStub); - k8StubContexts.readCurrent.returns('context-from-kubeConfig'); + k8ContextsStub.readCurrent.returns('context-from-kubeConfig'); const configManager = sandbox.createStubInstance(ConfigManager); @@ -205,7 +208,7 @@ describe('ClusterCommand unit tests', () => { return { logger: loggerStub, helm: sandbox.createStubInstance(Helm), - k8: k8Stub, + k8Factory: k8FactoryStub, chartManager: sandbox.createStubInstance(ChartManager), configManager, depManager: sandbox.createStubInstance(DependencyManager), @@ -225,7 +228,7 @@ describe('ClusterCommand unit tests', () => { async function runUpdateLocalConfigTask(opts) { command = new ClusterCommand(opts); - tasks = new ClusterCommandTasks(command, opts.k8); + tasks = new ClusterCommandTasks(command, opts.k8Factory); // @ts-expect-error - TS2554: Expected 0 arguments, but got 1. const taskObj = tasks.updateLocalConfig({}); @@ -363,7 +366,7 @@ describe('ClusterCommand unit tests', () => { async function runSelectContextTask(opts) { command = new ClusterCommand(opts); - tasks = new ClusterCommandTasks(command, opts.k8); + tasks = new ClusterCommandTasks(command, opts.k8Factory); // @ts-expect-error - TS2554: Expected 0 arguments, but got 1 const taskObj = tasks.selectContext({}); @@ -403,21 +406,21 @@ describe('ClusterCommand unit tests', () => { ]); command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('provided-context-1'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith('provided-context-1'); }); it('should use local config mapping to connect to first provided cluster', async () => { const opts = getBaseCommandOpts(sandbox, {}, [[flags.clusterName, 'cluster-2,cluster-3']]); command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-2'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith('context-2'); }); it('should prompt for context if selected cluster is not found in local config mapping', async () => { const opts = getBaseCommandOpts(sandbox, {}, [[flags.clusterName, 'cluster-3']]); command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-3'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith('context-3'); }); it('should use default kubeConfig context if selected cluster is not found in local config mapping and quiet=true', async () => { @@ -427,21 +430,23 @@ describe('ClusterCommand unit tests', () => { ]); command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-from-kubeConfig'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith( + 'context-from-kubeConfig', + ); }); it('should use context from local config mapping for the first cluster from the selected deployment', async () => { const opts = getBaseCommandOpts(sandbox, {}, [[flags.deployment, 'deployment-2']]); command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-2'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith('context-2'); }); it('should prompt for context if selected deployment is found in local config but the context is not', async () => { const opts = getBaseCommandOpts(sandbox, {}, [[flags.deployment, 'deployment-3']]); command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-3'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith('context-3'); }); it('should use default context if selected deployment is found in local config but the context is not and quiet=true', async () => { @@ -451,14 +456,16 @@ describe('ClusterCommand unit tests', () => { ]); command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-from-kubeConfig'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith( + 'context-from-kubeConfig', + ); }); it('should prompt for clusters and contexts if selected deployment is not found in local config', async () => { const opts = getBaseCommandOpts(sandbox, {}, [[flags.deployment, 'deployment-4']]); command = await runSelectContextTask(opts); - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-3'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith('context-3'); }); it('should use clusters and contexts from kubeConfig if selected deployment is not found in local config and quiet=true', async () => { @@ -468,7 +475,9 @@ describe('ClusterCommand unit tests', () => { ]); command = await runSelectContextTask(opts); - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-from-kubeConfig'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith( + 'context-from-kubeConfig', + ); }); it('throws error when context is invalid', async () => { @@ -490,7 +499,7 @@ describe('ClusterCommand unit tests', () => { async function runReadClustersFromRemoteConfigTask(opts) { command = new ClusterCommand(opts); - tasks = new ClusterCommandTasks(command, k8Stub); + tasks = new ClusterCommandTasks(command, k8FactoryStub); const taskObj = tasks.readClustersFromRemoteConfig({}); taskStub = sandbox.stub() as unknown as ListrTaskWrapper; taskStub.newListr = sandbox.stub(); @@ -552,9 +561,9 @@ describe('ClusterCommand unit tests', () => { expect(subTasks.length).to.eq(2); await runSubTasks(subTasks); expect(contextPromptStub).not.called; - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('context-2'); - expect(command.getK8().contexts().testContextConnection).calledOnce; - expect(command.getK8().contexts().testContextConnection).calledWith('context-2'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith('context-2'); + expect(command.getK8Factory().default().contexts().testContextConnection).calledOnce; + expect(command.getK8Factory().default().contexts().testContextConnection).calledWith('context-2'); }); it('should prompt for context when reading unknown cluster', async () => { @@ -571,9 +580,9 @@ describe('ClusterCommand unit tests', () => { expect(subTasks.length).to.eq(2); await runSubTasks(subTasks); expect(contextPromptStub).calledOnce; - expect(command.getK8().contexts().updateCurrent).to.have.been.calledWith('prompted-context'); - expect(command.getK8().contexts().testContextConnection).calledOnce; - expect(command.getK8().contexts().testContextConnection).calledWith('prompted-context'); + expect(command.getK8Factory().default().contexts().updateCurrent).to.have.been.calledWith('prompted-context'); + expect(command.getK8Factory().default().contexts().testContextConnection).calledOnce; + expect(command.getK8Factory().default().contexts().testContextConnection).calledWith('prompted-context'); }); it('should throw error for invalid prompted context', async () => { @@ -594,8 +603,8 @@ describe('ClusterCommand unit tests', () => { } catch (e) { expect(e.message).to.eq(ErrorMessages.INVALID_CONTEXT_FOR_CLUSTER_DETAILED('prompted-context', 'cluster-4')); expect(contextPromptStub).calledOnce; - expect(command.getK8().contexts().testContextConnection).calledOnce; - expect(command.getK8().contexts().testContextConnection).calledWith('prompted-context'); + expect(command.getK8Factory().default().contexts().testContextConnection).calledOnce; + expect(command.getK8Factory().default().contexts().testContextConnection).calledWith('prompted-context'); } }); @@ -623,8 +632,8 @@ describe('ClusterCommand unit tests', () => { } catch (e) { expect(e.message).to.eq(ErrorMessages.REMOTE_CONFIGS_DO_NOT_MATCH('cluster-3', 'cluster-4')); expect(contextPromptStub).calledOnce; - expect(command.getK8().contexts().testContextConnection).calledOnce; - expect(command.getK8().contexts().testContextConnection).calledWith('prompted-context'); + expect(command.getK8Factory().default().contexts().testContextConnection).calledOnce; + expect(command.getK8Factory().default().contexts().testContextConnection).calledWith('prompted-context'); } }); }); diff --git a/test/unit/commands/network.test.ts b/test/unit/commands/network.test.ts index 57899d386..e0b83c92b 100644 --- a/test/unit/commands/network.test.ts +++ b/test/unit/commands/network.test.ts @@ -23,7 +23,7 @@ import {ListrLease} from '../../../src/core/lease/listr_lease.js'; import {GenesisNetworkDataConstructor} from '../../../src/core/genesis_network_models/genesis_network_data_constructor.js'; import {container} from 'tsyringe-neo'; import {type SoloLogger} from '../../../src/core/logging.js'; -import {type K8} from '../../../src/core/kube/k8.js'; +import {type K8Factory} from '../../../src/core/kube/k8_factory.js'; import {type DependencyManager} from '../../../src/core/dependency_managers/index.js'; import {type LocalConfig} from '../../../src/core/config/local_config.js'; import {resetForTest} from '../../test_container.js'; @@ -58,17 +58,19 @@ describe('NetworkCommand unit tests', () => { opts.configManager = container.resolve(InjectTokens.ConfigManager); opts.configManager.update(argv); - opts.k8 = sinon.stub() as unknown as K8; - opts.k8.namespaces = sinon.stub().returns({ + opts.k8Factory = sinon.stub() as unknown as K8Factory; + const k8Stub = sinon.stub(); + opts.k8Factory.default = sinon.stub().returns(k8Stub); + opts.k8Factory.default().namespaces = sinon.stub().returns({ has: sinon.stub().returns(true), }); - opts.k8.configMaps = sinon.stub() as unknown as K8ClientConfigMaps; - opts.k8.configMaps.read = sinon.stub(); - opts.k8.pods = sinon.stub().returns({ + opts.k8Factory.default().configMaps = sinon.stub() as unknown as K8ClientConfigMaps; + opts.k8Factory.default().configMaps.read = sinon.stub(); + opts.k8Factory.default().pods = sinon.stub().returns({ waitForRunningPhase: sinon.stub(), waitForReadyStatus: sinon.stub(), }); - opts.k8.leases = sinon.stub().returns({ + opts.k8Factory.default().leases = sinon.stub().returns({ read: sinon.stub(), }); const clusterChecksStub = sinon.stub() as unknown as ClusterChecks; @@ -77,8 +79,8 @@ describe('NetworkCommand unit tests', () => { clusterChecksStub.isCertManagerInstalled = sinon.stub(); container.registerInstance(InjectTokens.ClusterChecks, clusterChecksStub); - opts.k8.logger = opts.logger; - container.registerInstance(InjectTokens.K8, opts.k8); + opts.k8Factory.default().logger = opts.logger; + container.registerInstance(InjectTokens.K8Factory, opts.k8Factory); opts.depManager = sinon.stub() as unknown as DependencyManager; container.registerInstance(InjectTokens.DependencyManager, opts.depManager); diff --git a/test/unit/commands/node.test.ts b/test/unit/commands/node.test.ts index e9f32b3b2..3fc30829e 100644 --- a/test/unit/commands/node.test.ts +++ b/test/unit/commands/node.test.ts @@ -10,7 +10,7 @@ import {NodeCommand} from '../../../src/commands/node/index.js'; const getBaseCommandOpts = () => ({ logger: sinon.stub(), helm: sinon.stub(), - k8: sinon.stub(), + k8Factory: sinon.stub(), chartManager: sinon.stub(), configManager: sinon.stub(), depManager: sinon.stub(),