diff --git a/Dockerfile b/Dockerfile index 0db092c2..7cbd284f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,10 +10,16 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* && \ mkdir /var/log/kqueen-api +# install aws dependencies +RUN curl -o heptio-authenticator-aws https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/bin/linux/amd64/heptio-authenticator-aws && \ + chmod +x ./heptio-authenticator-aws && \ + mkdir -p $HOME/bin && \ + cp ./heptio-authenticator-aws $HOME/bin/heptio-authenticator-aws && export PATH=$HOME/bin:$PATH && \ + echo 'export PATH=$HOME/bin:$PATH' >> ~/.bashrc + # copy app COPY . . RUN pip install . - # run app CMD ./entrypoint.sh diff --git a/docs/kqueen.rst b/docs/kqueen.rst index b9374642..81bb0160 100644 --- a/docs/kqueen.rst +++ b/docs/kqueen.rst @@ -409,6 +409,47 @@ Provision a Kubernetes cluster using Google Kubernetes Engine #. Click ``Submit``. #. To track the cluster status, navigate to the KQueen main dashboard. +Provision a Kubernetes cluster using Amazon Elastic Kubernetes Service +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +#. Create your Amazon EKS service role, Amazon EKS Cluster VPC - `Official EKS Quickstart `_. +#. Create Amazon EKS IAM Policy - `Creating Amazon EKS IAM Policies `_. +#. Create Amazon EKS IAM Passrole Policy - `Creating Amazon Passrole Policies `_. In policy json-snippet switch all ``ec2`` entities to ``eks``. +#. Create User account with Programmatic Access and attach created policies - `Creating an IAM User in Your AWS Account `_. Don't forget to save User Key/Secret Pair. +#. Attach additional policies to User account for Heptio Authentication: + + #. AmazonEC2FullAccess + #. AmazonRoute53FullAccess + #. AmazonS3FullAccess + #. IAMFullAccess + #. AmazonVPCFullAccess + +#. Log in to the KQueen web UI. +#. From the ``Create Provisioner`` tab, select ``Amazon Elastic Kubernetes Service`` and set the + following: + + #. Set the ``AWS Access Key`` as User Key from step 4. + #. Set the ``AWS Access Secret`` as User Secret from step 4. + #. Set the ``AWS Region`` for your deployments, pay attention, that currently Amazon EKS clusters works properly only in ``US West (Oregon) (us-west-2)`` and ``US East (N. Virginia) (us-east-1)``. + +#. In the KQueen web UI, click ``Deploy Cluster``. +#. Select created EKS provisioner. +#. From the ``Deploy Cluster`` tab, select ``Amazon Elastic Kubernetes Service`` provisioner and set the + following: + + #. Set the ``IAM Role ARN`` as Amazon EKS service role from step 1. + #. Set the ``Subnet Id`` and ``Security Group Id`` related to Amazon EKS Cluster VPC from step 1. + #. Specify the cluster requirements. + +#. Click ``Submit``. +#. To track the cluster status, navigate to the KQueen main dashboard. + +.. note:: + + Currently, KQueen does not support Kubernetes workers-management because of the Amazon EKS API restrictions. + To cover the Kubernetes workers use cases, see `Getting started with Amazon EKS `_. + + Provision a Kubernetes cluster using Azure Kubernetes Service ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -425,7 +466,7 @@ Provision a Kubernetes cluster using Azure Kubernetes Service tab, verify that the Application has the ``Owner`` role in the resource group. #. Log in to the KQueen web UI. -#. From the ``Create provisioner`` tab, select the AKS engine and set the +#. From the ``Create provisioner`` tab, select the ``AKS engine`` and set the following: #. Set the ``Client ID`` as Application ID from step 3. diff --git a/entrypoint.sh b/entrypoint.sh index 45bb2b5a..2c72d0f3 100755 --- a/entrypoint.sh +++ b/entrypoint.sh @@ -1,5 +1,7 @@ #!/bin/bash +set -ex +# export additional variables export prometheus_multiproc_dir="$(mktemp -d)" BOOTSTRAP_ADMIN="${BOOTSTRAP_ADMIN:-False}" BOOTSTRAP_ADMIN_USERNAME="${BOOTSTRAP_ADMIN_USERNAME:-admin}" diff --git a/kqueen/engines/__init__.py b/kqueen/engines/__init__.py index 9c8b6102..8e3fa43b 100644 --- a/kqueen/engines/__init__.py +++ b/kqueen/engines/__init__.py @@ -2,5 +2,6 @@ from .manual import ManualEngine from .gce import GceEngine from .aks import AksEngine +from .eks import EksEngine -__all__ = ['JenkinsEngine', 'ManualEngine', 'GceEngine', 'AksEngine'] +__all__ = ['JenkinsEngine', 'ManualEngine', 'GceEngine', 'AksEngine', 'EksEngine'] diff --git a/kqueen/engines/eks.py b/kqueen/engines/eks.py new file mode 100644 index 00000000..3dde52fc --- /dev/null +++ b/kqueen/engines/eks.py @@ -0,0 +1,278 @@ +from kqueen.config import current_config +from kqueen.engines.base import BaseEngine +import boto3 + +import logging +import pkgutil +import yaml + +logger = logging.getLogger('kqueen_api') +config = current_config() + +STATE_MAP = { + 'CREATING': config.get('CLUSTER_PROVISIONING_STATE'), + 'ACTIVE': config.get('CLUSTER_OK_STATE'), + 'DELETING': config.get('CLUSTER_DEPROVISIONING_STATE'), + 'FAILED': config.get('CLUSTER_ERROR_STATE'), + 'UPDATING': config.get('CLUSTER_UPDATING_STATE') +} + + +class EksEngine(BaseEngine): + """ + Amazon Elastic Kubernetes Service + """ + name = 'eks' + verbose_name = 'Amazon Elastic Kubernetes Service' + parameter_schema = { + 'provisioner': { + 'aws_access_key': { + 'type': 'text', + 'label': 'AWS Access Key', + 'order': 0, + 'validators': { + 'required': True + } + }, + 'aws_secret_key': { + 'type': 'text', + 'label': 'AWS Secret Key', + 'order': 1, + 'validators': { + 'required': True + } + }, + 'aws_region': { + 'type': 'select', + 'label': 'AWS Region', + 'order': 2, + 'choices': [ + ('us-east-1', 'US East (N. Virginia)'), + ('us-east-2', 'US East (Ohio)'), + ('us-west-1', 'US West (N. California)'), + ('us-west-2', 'US West (Oregon)'), + ('ca-central-1', 'Canada (Central)'), + ('eu-central-1', 'EU (Frankfurt)'), + ('eu-west-1', 'EU (Ireland)'), + ('eu-west-2', 'EU (London)'), + ('eu-west-3', 'EU (Paris)'), + ('ap-northeast-1', 'Asia Pacific (Tokyo)'), + ('ap-northeast-2', 'Asia Pacific (Seoul)'), + ('ap-northeast-3', 'Asia Pacific (Osaka-Local)'), + ('ap-southeast-1', 'Asia Pacific (Singapore)'), + ('ap-southeast-2', 'Asia Pacific (Sydney)'), + ('ap-south-1', 'Asia Pacific (Mumbai)'), + ('sa-east-1', 'South America (São Paulo)') + ], + 'validators': { + 'required': True + } + } + }, + 'cluster': { +# 'node_count': { +# TODO currently unsupported due to EKS API restrictions +# 'type': 'integer', +# 'label': 'Node Count', +# 'default': 3, +# 'order': 3, +# 'validators': { +# 'required': True, +# 'min': 1, +# 'number': True +# } +# }, + 'role_arn': { + 'type': 'text', + 'label': 'IAM Role ARN', + 'order': 4, + 'validators': { + 'required': True + } + }, + 'subnet_id': { + # TODO subnetIds list - Amazon EKS requires subnets in at least two Availability Zones. + # TODO subnetIds must be attached to common security-group. + 'type': 'text', + 'label': 'Subnet Id', + 'order': 5, + 'validators': { + 'required': True + } + }, + 'security_group_id': { + # TODO securityGroupIds (list) -- Specify one or more security groups + 'type': 'text', + 'label': 'Security Group Id', + 'order': 6, + 'validators': { + 'required': False + } + } + } + } + + def __init__(self, cluster, **kwargs): + """ + Implementation of :func:`~kqueen.engines.base.BaseEngine.__init__` + """ + # Call parent init to save cluster on self + super(EksEngine, self).__init__(cluster, **kwargs) + # Client initialization + self.aws_access_key = kwargs['aws_access_key'] + self.aws_secret_key = kwargs['aws_secret_key'] + self.aws_region = kwargs['aws_region'] + self.client = self._get_client() + # Cluster settings + self.role_arn = kwargs['role_arn'] + subnets = kwargs['subnet_id'] + security_groups = kwargs.get('security_group_id', '') + + self.subnet_id = subnets.replace(' ', '').split(',') + self.security_group_id = security_groups.replace(' ', '').split(',') + # Get templates + files = self._get_template_files() + self.eks_kubeconfig = files['template.yaml'] + # Cache settings + self.cache_timeout = 5 * 60 + + def _get_template_files(self): + package_name = "kqueen.engines.resources.aws" + files = {} + entities = ['template'] + for f in entities: + files[f + ".yaml"] = pkgutil.get_data(package_name, f + ".yaml") + return files + + def _get_client(self): + """ + Initialize Eks client + """ + client = boto3.client( + 'eks', + region_name=self.aws_region, + aws_access_key_id=self.aws_access_key, + aws_secret_access_key=self.aws_secret_key + ) + return client + + def provision(self, **kwargs): + """ + Implementation of :func:`~kqueen.engines.base.BaseEngine.provision` + """ + try: + response = self.client.create_cluster( + name=self.cluster.id, + roleArn=self.role_arn, + resourcesVpcConfig={ + 'subnetIds': self.subnet_id, + 'securityGroupIds': self.security_group_id + } + ) + + # TODO: check if provisioning response is healthy + except Exception as e: + msg = 'Creating cluster {} failed with the following reason:'.format(self.cluster.id) + logger.exception(msg) + return False, e + return True, None + + def deprovision(self, **kwargs): + """ + Implementation of :func:`~kqueen.engines.base.BaseEngine.deprovision` + """ + # test if cluster is considered deprovisioned by the base method + result, error = super(EksEngine, self).deprovision(**kwargs) + if result: + return result, error + try: + self.client.delete_cluster(name=self.cluster.id) + # TODO: check if deprovisioning response is healthy + except Exception as e: + msg = 'Deleting cluster {} failed with the following reason:'.format(self.cluster.id) + logger.exception(msg) + return False, e + return True, None + + def resize(self, node_count, **kwargs): + """ Implement Later """ + msg = 'Resizing cluster for Eks engine is disabled' + return False, msg + + def get_kubeconfig(self): + """ + Implementation of :func:`~kqueen.engines.base.BaseEngine.get_kubeconfig` + """ + # TODO Currently, KQueen can't parse k8s config body due to unsupported user-exec auth through plugins, + # like Heptio, link: https://github.com/kubernetes-client/python/issues/514 + + if not self.cluster.kubeconfig: + cluster = self.client.describe_cluster(name=self.cluster.id) + kubeconfig = {} + if cluster['cluster']['status'] != 'ACTIVE': + return self.cluster.kubeconfig + self.cluster.kubeconfig = yaml.load(self.eks_kubeconfig) + self.cluster.kubeconfig['clusters'][0]['cluster'] = { + 'server': cluster['cluster']['endpoint'], + 'certificate-authority-data': cluster['cluster']['certificateAuthority']['data'] + } + self.cluster.kubeconfig['users'][0]['user']['exec']['args'][2] = cluster['cluster']['name'] + # Set user credentials for Heptio auth + self.cluster.kubeconfig['users'][0]['user']['exec']['env'][0]['value'] = self.aws_access_key + self.cluster.kubeconfig['users'][0]['user']['exec']['env'][1]['value'] = self.aws_secret_key + + # Uncomment following lines in case of specifying additional ARN role for heptio auth +# self.cluster.kubeconfig['users'][0]['user']['exec']['args'][4] = cluster['cluster']['roleArn'] +# self.cluster.kubeconfig['users'][0]['user']['exec']['args'][4] = cluster['cluster']['arn'] + self.cluster.save() + return self.cluster.kubeconfig + + def cluster_get(self): + """ + Implementation of :func:`~kqueen.engines.base.BaseEngine.cluster_get` + """ + response = {} + try: + response = self.client.describe_cluster(name=self.cluster.id) + except Exception as e: + msg = 'Fetching data from backend for cluster {} failed with the following reason:'.format(self.cluster.id) + logger.exception(msg) + return {} + state = STATE_MAP.get(response['cluster']['status'], config.get('CLUSTER_UNKNOWN_STATE')) + + key = 'cluster-{}-{}'.format(response['cluster']['name'], response['cluster']['name']) + cluster = { + 'key': key, + 'name': response['cluster']['name'], + 'id': response['cluster']['name'], + 'state': state, + 'metadata': self.cluster.metadata + } + return cluster + + def cluster_list(self): + """Is not needed in Eks""" + return [] + + @classmethod + def engine_status(cls, **kwargs): + try: + aws_access_key = kwargs['aws_access_key'] + aws_secret_key = kwargs['aws_secret_key'] + aws_region = kwargs['aws_region'] + except KeyError: + return config.get('PROVISIONER_ERROR_STATE') + + client = boto3.client( + 'eks', + region_name=aws_region, + aws_access_key_id=aws_access_key, + aws_secret_access_key=aws_secret_key + ) + try: + list(client.list_clusters()) + except Exception: + logger.exception('{} Eks Provisioner validation failed.'.format(cls.name)) + return config.get('PROVISIONER_UNKNOWN_STATE') + + return config.get('PROVISIONER_OK_STATE') diff --git a/kqueen/engines/resources/__init__.py b/kqueen/engines/resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/kqueen/engines/resources/aws/__init__.py b/kqueen/engines/resources/aws/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/kqueen/engines/resources/aws/template.yaml b/kqueen/engines/resources/aws/template.yaml new file mode 100644 index 00000000..15ae1ff0 --- /dev/null +++ b/kqueen/engines/resources/aws/template.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +clusters: +- cluster: + server: + certificate-authority-data: + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: aws + name: aws +current-context: aws +kind: Config +preferences: {} +users: +- name: aws + user: + exec: + apiVersion: client.authentication.k8s.io/v1alpha1 + command: heptio-authenticator-aws + env: + - name: "AWS_ACCESS_KEY_ID" + value: "" + - name: "AWS_SECRET_ACCESS_KEY" + value: "" + args: + - "token" + - "-i" + - "" + # - "-r" + # - "" diff --git a/kqueen/kubeapi.py b/kqueen/kubeapi.py index b968e664..6e0bbf7d 100644 --- a/kqueen/kubeapi.py +++ b/kqueen/kubeapi.py @@ -54,7 +54,6 @@ def get_api_client(self): config_dict=kubeconfig, ) kcl.load_and_set(client_config) - return client.ApiClient(configuration=client_config) def get_version(self): diff --git a/setup.py b/setup.py index 65aa4a36..be060163 100644 --- a/setup.py +++ b/setup.py @@ -52,6 +52,7 @@ 'google-auth-httplib2==0.0.3', 'azure', 'azure-mgmt-containerservice', + 'boto3', ], setup_requires=[ 'pytest-runner',