Skip to content

Commit

Permalink
Merge branch 'master' into 46-override-notification-settings
Browse files Browse the repository at this point in the history
  • Loading branch information
wokeGit committed Oct 11, 2017
2 parents 2fcc2b8 + 4089d76 commit 83063f6
Show file tree
Hide file tree
Showing 60 changed files with 4,981 additions and 117 deletions.
21 changes: 21 additions & 0 deletions .argo/git-checkout.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
---
type: container
version: 1
name: git-checkout
description: Checks out a source repository to /src
resources:
mem_mib: 500
cpu_cores: 0.1
image: indiehosters/git
command: ["bash", "-c"]
args: ["git clone %%inputs.parameters.REPO%% /src && cd /src && git checkout %%inputs.parameters.COMMIT%%"]
inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
outputs:
artifacts:
CODE:
path: /src
111 changes: 111 additions & 0 deletions .argo/lite-ci.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,111 @@
---
type: workflow
version: 1
name: Argo Lite Release

inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"
IMAGE_URL:
default: "argoproj/argo-lite:staging"
REG_USER:
default: "%%[email protected]%%"
REG_PASSWORD:
default: "%%[email protected]%%"

steps:
- build:
template: Argo Lite CI
- publish:
template: lite-publish
arguments:
artifacts.API: "%%steps.build.outputs.artifacts.API%%"
artifacts.UI: "%%steps.build.outputs.artifacts.UI%%"
parameters.IMAGE_URL: "%%inputs.parameters.IMAGE_URL%%"
parameters.REG_USER: "%%inputs.parameters.REG_USER%%"
parameters.REG_PASSWORD: "%%inputs.parameters.REG_PASSWORD%%"

---
type: workflow
version: 1
name: Argo Lite CI

inputs:
parameters:
COMMIT:
default: "%%session.commit%%"
REPO:
default: "%%session.repo%%"

steps:
- checkout:
template: git-checkout
- build-api:
template: lite-build-api
arguments:
artifacts.CODE: "%%steps.checkout.outputs.artifacts.CODE%%"
build-ui:
template: gui-build
arguments:
artifacts.CODE: "%%steps.checkout.outputs.artifacts.CODE%%"

outputs:
artifacts:
UI:
from: "%%steps.build-ui.outputs.artifacts.CODE%%"
API:
from: "%%steps.build-api.outputs.artifacts.CODE%%"


---
type: container
version: 1
name: lite-build-api

inputs:
artifacts:
CODE:
path: /src

outputs:
artifacts:
CODE:
path: /src

image: node:6.3.1
resources:
mem_mib: 1024
cpu_cores: 0.1
command: ["sh", "-c"]
args: [cd /src/lite && npm install -g yarn && yarn install && npm run build]

---
type: container
version: 1
name: lite-publish

inputs:
parameters:
IMAGE_URL:
REG_USER:
REG_PASSWORD:
artifacts:
UI:
path: /ui
API:
path: /api

image: docker
resources:
mem_mib: 512
cpu_cores: 0.1
command: ["sh", "-c"]
args: [
docker login -u %%inputs.parameters.REG_USER%% -p %%inputs.parameters.REG_PASSWORD%% $REGISTRY &&
cd /api/lite && cp -r /ui/saas/axops/src/ui/dist/ ./dist/ui && docker build -t %%inputs.parameters.IMAGE_URL%% . && docker push %%inputs.parameters.IMAGE_URL%%
]
annotations:
ax_ea_docker_enable: '{"graph-storage-size": "10Gi", "cpu_cores":0.1, "mem_mib":200}'
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ Argo is an open source container-native workflow engine for developing and runni

### Step 1: Download and install Argo

https://argoproj.github.io/argo-site/get-started/installation
https://argoproj.github.io/get-started/installation

### Step 2: Create and submit jobs

Expand Down
2 changes: 0 additions & 2 deletions build/ax/build/saas.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,8 +50,6 @@ def build_one(self, container, **kwargs):
build_script = os.path.join(path, "build.sh")
if not os.path.isfile(build_script):
build_script = None
if self.debug and build_script:
build_script += " --debug"
ret = super(SaasBuilder, self).build_one(container, build_script=build_script, **kwargs)
shutil.rmtree(build_dir, ignore_errors=True)
return ret
Expand Down
33 changes: 27 additions & 6 deletions common/python/ax/cloud/aws/aws_s3.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
This is organized at bucket level.
"""

import os
import logging
import json
import time
Expand Down Expand Up @@ -101,8 +102,12 @@ def __init__(self, bucket_name, aws_profile=None, region=None):
logger.info("Using region %s for bucket %s", self._region, self._name)

session = boto3.Session(profile_name=aws_profile, region_name=self._region)
self._s3 = session.resource("s3")
self._s3_client = session.client("s3")
self._s3 = session.resource("s3", aws_access_key_id=os.environ.get("ARGO_S3_ACCESS_KEY_ID", None),
aws_secret_access_key=os.environ.get("ARGO_S3_ACCESS_KEY_SECRET", None),
endpoint_url=os.environ.get("ARGO_S3_ENDPOINT", None))
self._s3_client = session.client("s3", aws_access_key_id=os.environ.get("ARGO_S3_ACCESS_KEY_ID", None),
aws_secret_access_key=os.environ.get("ARGO_S3_ACCESS_KEY_SECRET", None),
endpoint_url=os.environ.get("ARGO_S3_ENDPOINT", None))
self._bucket = self._s3.Bucket(self._name)
self._policy = self._s3.BucketPolicy(self._name)

Expand All @@ -112,6 +117,20 @@ def __repr__(self):
def get_bucket_name(self):
return self._name

@staticmethod
def supports_encryption():
# only s3proxy doesn't support encryption
# TODO: replace with check for cloud != minikube
return not os.environ.get("ARGO_S3_ENDPOINT", None)

@staticmethod
def supports_signed_url():
# use signed url for aws s3 only
# aws s3 is assumed when s3 endpoint is not specified
# TODO: replace with check for cloud == aws
return not os.environ.get("ARGO_S3_ENDPOINT", None)


@retry(
retry_on_exception=head_bucket_retry,
wait_exponential_multiplier=1000,
Expand All @@ -123,8 +142,10 @@ def _do_get_region(start_region):
s3 = boto3.Session(
profile_name=self._aws_profile,
region_name=start_region
).client("s3", config=Config(signature_version='s3v4'))

).client("s3", aws_access_key_id=os.environ.get("ARGO_S3_ACCESS_KEY_ID", None),
aws_secret_access_key=os.environ.get("ARGO_S3_ACCESS_KEY_SECRET", None),
endpoint_url=os.environ.get("ARGO_S3_ENDPOINT", None),
config=Config(signature_version='s3v4'))
logger.debug("Finding region for bucket %s from with initial region %s", self._name, start_region)
try:
response = s3.head_bucket(Bucket=self._name)
Expand Down Expand Up @@ -334,11 +355,11 @@ def get_object(self, key, **kwargs):
http://boto3.readthedocs.org/en/latest/reference/services/s3.html#S3.Object.get
:return: actual object or None
"""
if not self.exists():
return None
try:
return self._s3.Object(self._name, key).get(**kwargs)["Body"].read().decode("utf-8")
except Exception as e:
if not self.exists():
return None
if "NoSuchKey" not in str(e):
raise
return None
Expand Down
8 changes: 6 additions & 2 deletions common/python/ax/cluster_management/app/cluster_installer.py
Original file line number Diff line number Diff line change
Expand Up @@ -490,7 +490,7 @@ def _generate_raw_cluster_config_dict_gcp(self, config):
config["cloud"]["trusted_cidr"] = self._cfg.trusted_cidrs
return config

def update_and_save_config(self, cluster_bucket=None):
def update_and_save_config(self, cluster_bucket=None, bucket_endpoint=None, cloud_provider=None):
"""
Update the config to use the given bucket and upload cluster_config and kubeconfig
to the given bucket.
Expand All @@ -499,6 +499,8 @@ def update_and_save_config(self, cluster_bucket=None):
self._cluster_config.set_config(raw_cluster_config_dict)
self._cluster_config.set_cluster_provider(ClusterProvider.USER)
self._cluster_config.set_support_object_store_name(cluster_bucket)
self._cluster_config.set_bucket_endpoint(bucket_endpoint)
self._cluster_config.set_provider(cloud_provider)

# Save config file to s3.
self._cluster_config.save_config()
Expand All @@ -518,7 +520,9 @@ def post_run(self):
return

def run(self):
self._ci_installer.update_and_save_config(cluster_bucket=self._cfg.get_cluster_bucket())
self._ci_installer.update_and_save_config(cluster_bucket=self._cfg.get_cluster_bucket(),
bucket_endpoint=self._cfg.bucket_endpoint,
cloud_provider=self._cfg.cloud_provider)

cluster_dns, username, password = self._ci_installer.install_and_run_platform()
self._ci_installer.post_install()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -344,7 +344,7 @@ def __init__(self, cfg):
cfg.vpc_id = None
cfg.vpc_cidr_base = None
cfg.subnet_mask_size = None
cfg.trusted_cidrs = None
cfg.trusted_cidrs = ClusterInstallDefaults.TRUSTED_CIDR
cfg.user_on_demand_nodes = None
cfg.spot_instances_option = "none"
cfg.cluster_autoscaling_scan_interval = None
Expand All @@ -356,6 +356,7 @@ def __init__(self, cfg):
if cfg.cloud_provider == "minikube":
self.service_manifest_root = "/ax/config/service/argo-wfe"
self.platform_bootstrap_config = "/ax/config/service/config/argo-wfe-platform-bootstrap.cfg"
Cloud(target_cloud="aws")
else:
self.service_manifest_root = "/ax/config/service/argo-all"
self.platform_bootstrap_config = "/ax/config/service/config/argo-all-platform-bootstrap.cfg"
Expand Down Expand Up @@ -435,5 +436,9 @@ def add_platform_only_flags(parser):

# Add bucket
parser.add_argument("--cluster-bucket", default=None, required=True, help="S3 complaint bucket to use")
parser.add_argument("--bucket-endpoint", default=None, help="HTTP Endpoint for the cluster-bucket")
parser.add_argument("--access-key", default=None, help="Access key for accessing the bucket")
parser.add_argument("--secret-key", default=None, help="Secret key for accessing the bucket")

# Add kubeconfig
parser.add_argument("--kubeconfig", default=None, required=True, help="Kubeconfig file for the cluster")
54 changes: 50 additions & 4 deletions common/python/ax/cluster_management/argo_cluster_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,20 @@

from ax.cloud import Cloud
from ax.cloud.aws import SecurityToken
from ax.platform.cluster_infra import get_host_ip
from ax.util.const import COLOR_NORM, COLOR_RED
from .app import ClusterInstaller, ClusterPauser, ClusterResumer, ClusterUninstaller, ClusterUpgrader, \
CommonClusterOperations, PlatformOnlyInstaller
from .app.options import add_install_flags, add_platform_only_flags, ClusterInstallConfig, add_pause_flags, ClusterPauseConfig, \
add_restart_flags, PlatformOnlyInstallConfig, ClusterRestartConfig, add_uninstall_flags, ClusterUninstallConfig, \
add_upgrade_flags, ClusterUpgradeConfig, add_misc_flags, ClusterMiscOperationConfig

import subprocess
import requests
import time

from ax.kubernetes.client import KubernetesApiClient

logger = logging.getLogger(__name__)


Expand Down Expand Up @@ -130,19 +137,58 @@ def upgrade(self, args):
self._ensure_customer_id(upgrade_config.cloud_profile)
ClusterUpgrader(upgrade_config).start()

def _set_env_if_present(self, args):
try:
os.environ["AX_AWS_REGION"] = args.cloud_region
except Exception:
pass

try:
os.environ["ARGO_S3_ACCESS_KEY_ID"] = args.access_key
except Exception:
pass

try:
os.environ["ARGO_S3_ACCESS_KEY_SECRET"] = args.secret_key
except Exception:
pass

try:
os.environ["ARGO_S3_ENDPOINT"] = args.bucket_endpoint
except Exception:
pass

def _get_s3_proxy_port(self, kubeconfig):
k8s = KubernetesApiClient(config_file=kubeconfig)
resp = k8s.api.list_namespaced_service("default")
for i in resp.items:
if i.metadata.name == "s3proxy":
return i.spec.ports[0].node_port

return None

def _get_s3_proxy_endpoint(self, kubeconfig):
host = get_host_ip(kubeconfig)
port = self._get_s3_proxy_port(kubeconfig)
return "http://" + host + ":" + port

def install_platform_only(self, args):
logger.info("Installing platform only ...")

os.environ["AX_CUSTOMER_ID"] = "user-customer-id"
os.environ["ARGO_LOG_BUCKET_NAME"] = args.cluster_bucket
os.environ["ARGO_DATA_BUCKET_NAME"] = args.cluster_bucket
os.environ["ARGO_KUBE_CONFIG_PATH"] = args.kubeconfig
os.environ["AX_TARGET_CLOUD"] = Cloud.CLOUD_AWS

try:
os.environ["AX_AWS_REGION"] = args.cloud_region
except Exception:
pass
if args.cloud_provider == "minikube":
s3_proxy_present = self._get_s3_proxy_port(args.kubeconfig) != None
if not s3_proxy_present:
# Install s3_proxy
args.bucket_endpoint = self._get_s3_proxy_endpoint(args.kubeconfig)
# Create bucket

self._set_env_if_present(args)
platform_install_config = PlatformOnlyInstallConfig(cfg=args)
PlatformOnlyInstaller(platform_install_config).run()
return
Expand Down
Loading

0 comments on commit 83063f6

Please sign in to comment.