Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add support for using load balancer address in gossip configuration #1405

Merged
merged 6 commits into from
Feb 16, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
83 changes: 82 additions & 1 deletion src/commands/network.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ import {type ConsensusNode} from '../core/model/consensus_node.js';
import {type ClusterRef, type ClusterRefs} from '../core/config/remote/types.js';
import {Base64} from 'js-base64';
import {SecretType} from '../core/kube/resources/secret/secret_type.js';
import {Duration} from '../core/time/duration.js';

export interface NetworkDeployConfigClass {
applicationEnv: string;
Expand Down Expand Up @@ -492,7 +493,8 @@ export class NetworkCommand extends BaseCommand {
clusterRef =>
(valuesArgs[clusterRef] +=
' --set "defaults.haproxy.service.type=LoadBalancer"' +
' --set "defaults.envoyProxy.service.type=LoadBalancer"'),
' --set "defaults.envoyProxy.service.type=LoadBalancer"' +
' --set "defaults.consensus.service.type=LoadBalancer"'),
);
}

Expand Down Expand Up @@ -825,6 +827,85 @@ export class NetworkCommand extends BaseCommand {
}
},
},
{
title: 'Check for load balancer',
skip: ctx => ctx.config.loadBalancerEnabled === false,
task: (ctx, task) => {
const subTasks: any[] = [];
const config = ctx.config;

//Add check for network node service to be created and load balancer to be assigned (if load balancer is enabled)
for (const consensusNode of config.consensusNodes) {
subTasks.push({
title: `Load balancer is assigned for: ${chalk.yellow(consensusNode.name)}, cluster: ${chalk.yellow(consensusNode.cluster)}`,
task: async () => {
let attempts = 0;
let svc = null;

while (attempts < 30) {
svc = await self.k8Factory
.getK8(consensusNode.context)
.services()
.list(config.namespace, [
`solo.hedera.com/node-id=${consensusNode.nodeId},solo.hedera.com/type=network-node-svc`,
]);

if (svc && svc.length > 0 && svc[0].status.loadBalancer.ingress.length > 0) {
return;
}

attempts++;
await helpers.sleep(Duration.ofSeconds(2));
}
},
});
}

// set up the sub-tasks
return task.newListr(subTasks, {
concurrent: true,
rendererOptions: {
collapseSubtasks: false,
},
});
},
},
{
title: 'Redeploy chart with external IP address config',
skip: ctx => ctx.config.loadBalancerEnabled === false,
task: async (ctx, task) => {
// Update the valuesArgMap with the external IP addresses
// This regenerates the config.txt and genesis-network.json files with the external IP addresses
ctx.config.valuesArgMap = await this.prepareValuesArgMap(ctx.config);

// Perform a helm upgrade for each cluster
const subTasks: any[] = [];
const config = ctx.config;
for (const clusterRef of Object.keys(config.clusterRefs)) {
subTasks.push({
title: `Upgrade chart for cluster: ${chalk.yellow(clusterRef)}`,
task: async () => {
await this.chartManager.upgrade(
config.namespace,
constants.SOLO_DEPLOYMENT_CHART,
ctx.config.chartPath,
config.soloChartVersion,
config.valuesArgMap[clusterRef],
config.clusterRefs[clusterRef],
);
},
});
}

// set up the sub-tasks
return task.newListr(subTasks, {
concurrent: true,
rendererOptions: {
collapseSubtasks: false,
},
});
},
},
{
title: 'Check node pods are running',
task: (ctx, task) => {
Expand Down
1 change: 0 additions & 1 deletion src/core/dependency_injection/container_init.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@ import * as constants from '../constants.js';
import {Helm} from '../helm.js';
import {ChartManager} from '../chart_manager.js';
import {ConfigManager} from '../config_manager.js';
import {K8Client} from '../kube/k8_client/k8_client.js';
import {AccountManager} from '../account_manager.js';
import {PlatformInstaller} from '../platform_installer.js';
import {KeyManager} from '../key_manager.js';
Expand Down
44 changes: 42 additions & 2 deletions src/core/helpers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import util from 'util';
import * as semver from 'semver';
import {SoloError} from './errors.js';
import {Templates} from './templates.js';
import {ROOT_DIR} from './constants.js';
import * as constants from './constants.js';
import {PrivateKey, ServiceEndpoint} from '@hashgraph/sdk';
import {type NodeAlias, type NodeAliases} from '../types/aliases.js';
Expand All @@ -20,7 +19,8 @@ import {type ConsensusNode} from './model/consensus_node.js';
import {type Optional} from '../types/index.js';
import {type Version} from './config/remote/types.js';
import {fileURLToPath} from 'url';
import {type NamespaceName} from './kube/resources/namespace/namespace_name.js';
import {NamespaceName} from './kube/resources/namespace/namespace_name.js';
import {type K8} from './kube/k8.js';

export function getInternalIp(releaseVersion: semver.SemVer, namespaceName: NamespaceName, nodeAlias: NodeAlias) {
//? Explanation: for v0.59.x the internal IP address is set to 127.0.0.1 to avoid an ISS
Expand Down Expand Up @@ -51,6 +51,46 @@ export function getInternalIp(releaseVersion: semver.SemVer, namespaceName: Name
return internalIp;
}

export async function getExternalAddress(
consensusNode: ConsensusNode,
k8: K8,
useLoadBalancer: boolean,
): Promise<string> {
if (useLoadBalancer) {
return resolveLoadBalancerAddress(consensusNode, k8);
}

return consensusNode.fullyQualifiedDomainName;
}

async function resolveLoadBalancerAddress(consensusNode: ConsensusNode, k8: K8): Promise<string> {
const ns = NamespaceName.of(consensusNode.namespace);
const serviceList = await k8
.services()
.list(ns, [`solo.hedera.com/node-id=${consensusNode.nodeId},solo.hedera.com/type=network-node-svc`]);

if (serviceList && serviceList.length > 0) {
const svc = serviceList[0];

if (!svc.metadata.name.startsWith('network-node')) {
throw new SoloError(`Service found is not a network node service: ${svc.metadata.name}`);
}

if (svc.status?.loadBalancer?.ingress && svc.status.loadBalancer.ingress.length > 0) {
for (let i = 0; i < svc.status.loadBalancer.ingress.length; i++) {
const ingress = svc.status.loadBalancer.ingress[i];
if (ingress.hostname) {
return ingress.hostname;
} else if (ingress.ip) {
return ingress.ip;
}
}
}
}

return consensusNode.fullyQualifiedDomainName;
}

export function sleep(duration: Duration) {
return new Promise<void>(resolve => {
setTimeout(resolve, duration.toMillis());
Expand Down
22 changes: 11 additions & 11 deletions src/core/kube/k8_client/k8_client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,27 +5,27 @@ import * as k8s from '@kubernetes/client-node';
import {SoloError} from '../../errors.js';
import {type K8} from '../k8.js';
import {type Namespaces} from '../resources/namespace/namespaces.js';
import {K8ClientClusters} from '../k8_client/resources/cluster/k8_client_clusters.js';
import {K8ClientClusters} from './resources/cluster/k8_client_clusters.js';
import {type Clusters} from '../resources/cluster/clusters.js';
import {type ConfigMaps} from '../resources/config_map/config_maps.js';
import {K8ClientConfigMaps} from '../k8_client/resources/config_map/k8_client_config_maps.js';
import {K8ClientContainers} from '../k8_client/resources/container/k8_client_containers.js';
import {K8ClientConfigMaps} from './resources/config_map/k8_client_config_maps.js';
import {K8ClientContainers} from './resources/container/k8_client_containers.js';
import {type Containers} from '../resources/container/containers.js';
import {type Contexts} from '../resources/context/contexts.js';
import {K8ClientContexts} from '../k8_client/resources/context/k8_client_contexts.js';
import {K8ClientPods} from '../k8_client/resources/pod/k8_client_pods.js';
import {K8ClientContexts} from './resources/context/k8_client_contexts.js';
import {K8ClientPods} from './resources/pod/k8_client_pods.js';
import {type Pods} from '../resources/pod/pods.js';
import {type Services} from '../resources/service/services.js';
import {K8ClientServices} from '../k8_client/resources/service/k8_client_services.js';
import {K8ClientServices} from './resources/service/k8_client_services.js';
import {type Pvcs} from '../resources/pvc/pvcs.js';
import {K8ClientPvcs} from '../k8_client/resources/pvc/k8_client_pvcs.js';
import {K8ClientPvcs} from './resources/pvc/k8_client_pvcs.js';
import {type Leases} from '../resources/lease/leases.js';
import {K8ClientLeases} from '../k8_client/resources/lease/k8_client_leases.js';
import {K8ClientNamespaces} from '../k8_client/resources/namespace/k8_client_namespaces.js';
import {K8ClientIngressClasses} from '../k8_client/resources/ingress_class/k8_client_ingress_classes.js';
import {K8ClientLeases} from './resources/lease/k8_client_leases.js';
import {K8ClientNamespaces} from './resources/namespace/k8_client_namespaces.js';
import {K8ClientIngressClasses} from './resources/ingress_class/k8_client_ingress_classes.js';
import {type IngressClasses} from '../resources/ingress_class/ingress_classes.js';
import {type Secrets} from '../resources/secret/secrets.js';
import {K8ClientSecrets} from '../k8_client/resources/secret/k8_client_secrets.js';
import {K8ClientSecrets} from './resources/secret/k8_client_secrets.js';
import {type Ingresses} from '../resources/ingress/ingresses.js';
import {K8ClientIngresses} from './resources/ingress/k8_client_ingresses.js';

Expand Down
32 changes: 24 additions & 8 deletions src/core/profile_manager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,12 +25,14 @@
import {NamespaceName} from './kube/resources/namespace/namespace_name.js';
import {InjectTokens} from './dependency_injection/inject_tokens.js';
import {type ConsensusNode} from './model/consensus_node.js';
import {type K8Factory} from './kube/k8_factory.js';

@injectable()
export class ProfileManager {
private readonly logger: SoloLogger;
private readonly configManager: ConfigManager;
private readonly cacheDir: DirPath;
private readonly k8Factory: K8Factory;

private profiles: Map<string, AnyObject>;
private profileFile: Optional<string>;
Expand All @@ -39,10 +41,12 @@
@inject(InjectTokens.SoloLogger) logger?: SoloLogger,
@inject(InjectTokens.ConfigManager) configManager?: ConfigManager,
@inject(InjectTokens.CacheDir) cacheDir?: DirPath,
@inject(InjectTokens.K8Factory) k8Factory?: K8Factory,
) {
this.logger = patchInject(logger, InjectTokens.SoloLogger, this.constructor.name);
this.configManager = patchInject(configManager, InjectTokens.ConfigManager, this.constructor.name);
this.cacheDir = path.resolve(patchInject(cacheDir, InjectTokens.CacheDir, this.constructor.name));
this.k8Factory = patchInject(k8Factory, InjectTokens.K8Factory, this.constructor.name);

this.profiles = new Map();
}
Expand Down Expand Up @@ -174,12 +178,12 @@
}
}

resourcesForConsensusPod(
async resourcesForConsensusPod(
profile: AnyObject,
consensusNodes: ConsensusNode[],
nodeAliases: NodeAliases,
yamlRoot: AnyObject,
): AnyObject {
): Promise<AnyObject> {
if (!profile) throw new MissingArgumentError('profile is required');

const accountMap = getNodeAccountMap(nodeAliases);
Expand All @@ -200,13 +204,14 @@
fs.mkdirSync(stagingDir, {recursive: true});
}

const configTxtPath = this.prepareConfigTxt(
const configTxtPath = await this.prepareConfigTxt(
accountMap,
consensusNodes,
stagingDir,
this.configManager.getFlag(flags.releaseTag),
this.configManager.getFlag(flags.app),
this.configManager.getFlag(flags.chainId),
this.configManager.getFlag(flags.loadBalancerEnabled),
);

for (const flag of flags.nodeConfigFileFlags.values()) {
Expand Down Expand Up @@ -321,7 +326,7 @@

// generate the YAML
const yamlRoot = {};
this.resourcesForConsensusPod(profile, consensusNodes, nodeAliases, yamlRoot);
await this.resourcesForConsensusPod(profile, consensusNodes, nodeAliases, yamlRoot);
this.resourcesForHaProxyPod(profile, yamlRoot);
this.resourcesForEnvoyProxyPod(profile, yamlRoot);
this.resourcesForMinioTenantPod(profile, yamlRoot);
Expand Down Expand Up @@ -450,15 +455,17 @@
* @param releaseTagOverride - release tag override
* @param [appName] - the app name (default: HederaNode.jar)
* @param [chainId] - chain ID (298 for local network)
* @param [loadBalancerEnabled] - whether the load balancer is enabled (flag is not set by default)
* @returns the config.txt file path
*/
prepareConfigTxt(
async prepareConfigTxt(
nodeAccountMap: Map<NodeAlias, string>,
consensusNodes: ConsensusNode[],
destPath: string,
releaseTagOverride: string,
appName = constants.HEDERA_APP_NAME,
chainId = constants.HEDERA_CHAIN_ID,
loadBalancerEnabled: boolean = false,
) {
let releaseTag = releaseTagOverride;
if (!nodeAccountMap || nodeAccountMap.size === 0) {
Expand All @@ -471,6 +478,11 @@
throw new IllegalArgumentError(`config destPath does not exist: ${destPath}`, destPath);
}

const configFilePath = path.join(destPath, 'config.txt');
if (fs.existsSync(configFilePath)) {

Check warning on line 482 in src/core/profile_manager.ts

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/core/profile_manager.ts#L482

The application dynamically constructs file or path information.
fs.unlinkSync(configFilePath);

Check warning on line 483 in src/core/profile_manager.ts

View check run for this annotation

Codacy Production / Codacy Static Code Analysis

src/core/profile_manager.ts#L483

The application dynamically constructs file or path information.
}

// init variables
const internalPort = +constants.HEDERA_NODE_INTERNAL_GOSSIP_PORT;
const externalPort = +constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT;
Expand All @@ -492,7 +504,13 @@
consensusNode.name as NodeAlias,
);

const externalIP = consensusNode.fullyQualifiedDomainName;
// const externalIP = consensusNode.fullyQualifiedDomainName;
const externalIP = await helpers.getExternalAddress(
consensusNode,
this.k8Factory.getK8(consensusNode.context),
loadBalancerEnabled,
);

const account = nodeAccountMap.get(consensusNode.name as NodeAlias);

configLines.push(
Expand All @@ -507,9 +525,7 @@
configLines.push(`nextNodeId, ${nodeSeq}`);
}

const configFilePath = path.join(destPath, 'config.txt');
fs.writeFileSync(configFilePath, configLines.join('\n'));

return configFilePath;
} catch (e: Error | unknown) {
throw new SoloError(
Expand Down
5 changes: 4 additions & 1 deletion test/e2e/commands/account.test.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/**
* SPDX-License-Identifier: Apache-2.0
*/
import {it, describe, after, before} from 'mocha';
import {after, before, describe, it} from 'mocha';
import {expect} from 'chai';

import {
Expand Down Expand Up @@ -49,8 +49,11 @@ argv[flags.generateGossipKeys.name] = true;
argv[flags.generateTlsKeys.name] = true;
argv[flags.clusterRef.name] = TEST_CLUSTER;
argv[flags.soloChartVersion.name] = version.SOLO_CHART_VERSION;
argv[flags.loadBalancerEnabled.name] = true;
// set the env variable SOLO_CHARTS_DIR if developer wants to use local Solo charts
argv[flags.chartDirectory.name] = process.env.SOLO_CHARTS_DIR ?? undefined;
// enable load balancer for e2e tests
argv[flags.loadBalancerEnabled.name] = true;

e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefined, undefined, true, bootstrapResp => {
describe('AccountCommand', async () => {
Expand Down
3 changes: 2 additions & 1 deletion test/e2e/commands/network.test.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/**
* SPDX-License-Identifier: Apache-2.0
*/
import {it, describe, after, before} from 'mocha';
import {after, before, describe, it} from 'mocha';
import {expect} from 'chai';

import {bootstrapTestVariables, getDefaultArgv, getTmpDir, HEDERA_PLATFORM_VERSION_TAG} from '../../test_util.js';
Expand Down Expand Up @@ -37,6 +37,7 @@ describe('NetworkCommand', function networkCommand() {
argv[flags.soloChartVersion.name] = version.SOLO_CHART_VERSION;
argv[flags.force.name] = true;
argv[flags.applicationEnv.name] = applicationEnvFilePath;
argv[flags.loadBalancerEnabled.name] = true;
// set the env variable SOLO_CHARTS_DIR if developer wants to use local Solo charts
argv[flags.chartDirectory.name] = process.env.SOLO_CHARTS_DIR ?? undefined;
argv[flags.quiet.name] = true;
Expand Down
Loading
Loading