-
Notifications
You must be signed in to change notification settings - Fork 33
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Added eksengine to engines/__init__.py #315
base: master
Are you sure you want to change the base?
Changes from 1 commit
f16ed10
47b4997
391f91c
625c540
b016a1d
6144bcf
0c0df12
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,26 @@ | ||
apiVersion: v1 | ||
clusters: | ||
- cluster: | ||
server: <endpoint-url> | ||
certificate-authority-data: <base64-encoded-ca-cert> | ||
name: kubernetes | ||
contexts: | ||
- context: | ||
cluster: kubernetes | ||
user: aws | ||
name: aws | ||
current-context: aws | ||
kind: Config | ||
preferences: {} | ||
users: | ||
- name: aws | ||
user: | ||
exec: | ||
apiVersion: client.authentication.k8s.io/v1alpha1 | ||
command: heptio-authenticator-aws | ||
args: | ||
- "token" | ||
- "-i" | ||
- "<cluster-name>" | ||
# - "-r" | ||
# - "<role-arn>" |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,220 @@ | ||
from kqueen.config import current_config | ||
from kqueen.engines.base import BaseEngine | ||
import boto3 | ||
|
||
import logging | ||
import yaml | ||
|
||
logger = logging.getLogger('kqueen_api') | ||
config = current_config() | ||
|
||
STATE_MAP = { | ||
'CREATING': config.get('CLUSTER_PROVISIONING_STATE'), | ||
'ACTIVE': config.get('CLUSTER_OK_STATE'), | ||
'DELETING': config.get('CLUSTER_DEPROVISIONING_STATE'), | ||
'FAILED': config.get('CLUSTER_ERROR_STATE'), | ||
'UPDATED': config.get('CLUSTER_UPDATING_STATE') | ||
} | ||
|
||
|
||
class EksEngine(BaseEngine): | ||
""" | ||
Amazon Elastic Kubernetes Service | ||
""" | ||
name = 'eks' | ||
verbose_name = 'Amazon Elastic Kubernetes Service' | ||
parameter_schema = { | ||
'provisioner': { | ||
'aws_access_key': { | ||
'type': 'text', | ||
'label': 'AWS Access Key', | ||
'order': 0, | ||
'validators': { | ||
'required': True | ||
} | ||
}, | ||
'aws_secret_key': { | ||
'type': 'text', | ||
'label': 'AWS Secret Key', | ||
'order': 1, | ||
'validators': { | ||
'required': True | ||
} | ||
} | ||
}, | ||
'cluster': { | ||
'node_count': { | ||
'type': 'integer', | ||
'label': 'Node Count', | ||
'default': 3, | ||
'validators': { | ||
'required': True, | ||
'min': 1, | ||
'number': True | ||
} | ||
}, | ||
'roleArn': { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's better to use snake_case everywhere |
||
'type': 'text', | ||
'label': 'IAM Role ARN', | ||
'validators': { | ||
'required': True | ||
} | ||
}, | ||
'subnetid': { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
'type': 'text', | ||
'label': 'Subnet Id', | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Subnet/s Id - since more than one can be chosen. Ideally we need to get list of them and provide ability to pick subnets on the fly, but for now we need to parse string to the list at least (on the UI part) |
||
'validators': { | ||
'required': True | ||
} | ||
}, | ||
'securitygroupid': { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
|
||
'type': 'text', | ||
'label': 'Security Group Id', | ||
'validators': { | ||
'required': True | ||
} | ||
} | ||
} | ||
} | ||
|
||
def __init__(self, cluster, **kwargs): | ||
""" | ||
Implementation of :func:`~kqueen.engines.base.BaseEngine.__init__` | ||
""" | ||
# Call parent init to save cluster on self | ||
super(EksEngine, self).__init__(cluster, **kwargs) | ||
# Client initialization | ||
self.aws_access_key = kwargs.get('aws_access_key', '') | ||
self.aws_secret_key = kwargs.get('aws_secret_key', '') | ||
self.client = self._get_client() | ||
# Cache settings | ||
self.cache_timeout = 5 * 60 | ||
|
||
def _get_client(self): | ||
""" | ||
Initialize Eks client | ||
""" | ||
client = boto3.client( | ||
'eks', | ||
aws_access_key_id=self.aws_access_key, | ||
aws_secret_access_key=self.aws_secret_key | ||
) | ||
return client | ||
|
||
def provision(self, **kwargs): | ||
""" | ||
Implementation of :func:`~kqueen.engines.base.BaseEngine.provision` | ||
""" | ||
# self.name = kwargs.get('name', 'noname') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ? :) |
||
try: | ||
response = self.client.create_cluster( | ||
name=self.cluster.name, | ||
roleArn=self.cluster.roleArn, | ||
resourcesVpcConfig={ | ||
'subnetIds': [ | ||
self.cluster.subnetid | ||
], | ||
'securityGroupIds': [ | ||
self.cluster.securitygroupid | ||
] | ||
} | ||
) | ||
|
||
self.cluster.metadata['endpoint'] = response['cluster']['endpoint'] | ||
self.cluster.metadata['roleArn'] = response['cluster']['roleArn'] | ||
self.cluster.metadata['status'] = response['cluster']['status'] | ||
self.cluster.metadata['id'] = response['cluster']['name'] | ||
self.cluster.save() | ||
# TODO: check if provisioning response is healthy | ||
except Exception as e: | ||
msg = 'Creating cluster {} failed with the following reason:'.format(self.cluster.id) | ||
logger.exception(msg) | ||
return False, msg | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Please, return only "e" instead of msg, since message will be huge and looks like: Error occurred with the following reason: Creating cluster failed with the following reason:' |
||
return True, None | ||
|
||
def deprovision(self, **kwargs): | ||
""" | ||
Implementation of :func:`~kqueen.engines.base.BaseEngine.deprovision` | ||
""" | ||
# test if cluster is considered deprovisioned by the base method | ||
result, error = super(EksEngine, self).deprovision(**kwargs) | ||
if result: | ||
return result, error | ||
try: | ||
self.client.delete_cluster(name=self.cluster.metadata['id']) | ||
# TODO: check if deprovisioning response is healthy | ||
except Exception as e: | ||
msg = 'Deleting cluster {} failed with the following reason:'.format(self.cluster.id) | ||
logger.exception(msg) | ||
return False, msg | ||
return True, None | ||
|
||
def resize(self, node_count, **kwargs): | ||
""" Implement Later """ | ||
msg = 'Resizing cluster for Eks engine is disabled' | ||
return False, msg | ||
|
||
def get_kubeconfig(self): | ||
""" | ||
Implementation of :func:`~kqueen.engines.base.BaseEngine.get_kubeconfig` | ||
""" | ||
if not self.cluster.kubeconfig: | ||
cluster = self.client.describe_cluster(name=self.cluster.metadata['id']) | ||
kubeconfig = {} | ||
if cluster['cluster']['status'] != "ACTIVE": | ||
return self.cluster.kubeconfig | ||
self.cluster.kubeconfig = yaml.load(open("eks-kubeconfig").read()) | ||
self.cluster.kubeconfig["clusters"][0]["cluster"] = { | ||
"server": cluster['endpoint'], | ||
"certificate-authority-data": cluster['certificateAuthority']['data'] | ||
} | ||
self.cluster.kubeconfig["users"][0]["user"]["exec"]["args"][2] = cluster['name'] | ||
self.cluster.save() | ||
return self.cluster.kubeconfig | ||
|
||
def cluster_get(self): | ||
""" | ||
Implementation of :func:`~kqueen.engines.base.BaseEngine.cluster_get` | ||
""" | ||
response = {} | ||
try: | ||
response = self.client.describe_cluster(self.cluster.metadata['id']) | ||
except Exception as e: | ||
msg = 'Fetching data from backend for cluster {} failed with the following reason:'.format(self.cluster.metadata['heat_cluster_id']) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. self.cluster.metadata['id'] |
||
logger.exception(msg) | ||
return {} | ||
state = STATE_MAP.get(response['cluster']['status'], config.get('CLUSTER_UNKNOWN_STATE')) | ||
|
||
key = 'cluster-{}-{}'.format(response['cluster']['name'], response['cluster']['name']) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. why names are used two times? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. one of them should be the id, but EKS uses the name as id at the moment, the only other option is to use the ARN which is extremely long arn:aws:eks:us-west-2:012345678910:cluster/prod |
||
cluster = { | ||
'key': key, | ||
'name': response['cluster']['name'], | ||
'id': response['cluster']['name'], | ||
'state': state, | ||
'metadata': self.cluster.metadata | ||
} | ||
return cluster | ||
|
||
def cluster_list(self): | ||
"""Is not needed in Eks""" | ||
return [] | ||
|
||
@classmethod | ||
def engine_status(cls, **kwargs): | ||
try: | ||
aws_access_key = kwargs.get('aws_access_key', '') | ||
aws_secret_key = kwargs.get('aws_secret_key', '') | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. did you mean 'aws_secret_access_key'? |
||
except Exception: | ||
logger.exception('{} Eks Provisioner validation failed.'.format(cls.name)) | ||
return config.get('PROVISIONER_ERROR_STATE') | ||
client = boto3.client( | ||
'eks', | ||
aws_access_key_id=aws_access_key, | ||
aws_secret_access_key=aws_secret_key | ||
) | ||
try: | ||
list(client.list_clusters()) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can we used more lightweight request here? (since this request is sending for each cluster, even for list all clusters operation)
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. seems, No , coz afaik only list_clusters can provide valid check on all required User IAM roles/policies and etc.. I understand that its huge request, but we are locked by small api |
||
except Exception: | ||
logger.exception('{} Eks Provisioner validation failed.'.format(cls.name)) | ||
return config.get('PROVISIONER_UNKNOWN_STATE') | ||
return config.get('PROVISIONER_OK_STATE') |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
'UPDATING'
?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@ekhomyakova I suppose those names comes from EKS, but I don't see updating in the list at all, so may be we can just remove updated from the list
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
for what i know the engine requires an updating state, so either we leave it or we need to put it to unknown