diff --git a/tests/e2e/go.mod b/tests/e2e/go.mod index 69442ca7457a6..d87f8e26a2cf9 100644 --- a/tests/e2e/go.mod +++ b/tests/e2e/go.mod @@ -8,6 +8,7 @@ replace k8s.io/kops => ../../. replace k8s.io/client-go => k8s.io/client-go v0.24.2 require ( + github.com/aws/aws-sdk-go v1.44.283 github.com/blang/semver/v4 v4.0.0 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/octago/sflags v0.2.0 @@ -35,7 +36,6 @@ require ( github.com/StackExchange/wmi v1.2.1 // indirect github.com/acomagu/bufpipe v1.0.3 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect - github.com/aws/aws-sdk-go v1.44.283 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver v3.5.1+incompatible // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect diff --git a/tests/e2e/kubetest2-kops/aws/s3.go b/tests/e2e/kubetest2-kops/aws/s3.go new file mode 100644 index 0000000000000..77a87139cab5a --- /dev/null +++ b/tests/e2e/kubetest2-kops/aws/s3.go @@ -0,0 +1,126 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws + +import ( + "context" + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/sts" + "k8s.io/klog/v2" +) + +// We need to pick some region to query the AWS APIs through, even if we are not running on AWS. +const defaultRegion = "us-east-2" + +type awsClient struct { + sts *sts.STS + s3 *s3.S3 +} + +func newAWSClient(ctx context.Context, creds *credentials.Credentials) (*awsClient, error) { + awsConfig := aws.NewConfig().WithRegion(defaultRegion).WithUseDualStack(true) + awsConfig = awsConfig.WithCredentialsChainVerboseErrors(true) + if creds != nil { + awsConfig = awsConfig.WithCredentials(creds) + } + + awsSession, err := session.NewSessionWithOptions(session.Options{ + Config: *awsConfig, + SharedConfigState: session.SharedConfigEnable, + }) + if err != nil { + return nil, fmt.Errorf("error starting new AWS session: %v", err) + } + + return &awsClient{ + sts: sts.New(awsSession, awsConfig), + s3: s3.New(awsSession, awsConfig), + }, nil +} + +// AWSBucketName constructs a bucket name that is unique to the AWS account. +func AWSBucketName(ctx context.Context, creds *credentials.Credentials) (string, error) { + client, err := newAWSClient(ctx, creds) + if err != nil { + return "", err + } + + callerIdentity, err := client.sts.GetCallerIdentity(&sts.GetCallerIdentityInput{}) + if err != nil { + return "", fmt.Errorf("error getting AWS caller identity from STS: %w", err) + } + bucket := "kops-test-" + aws.StringValue(callerIdentity.Account) + return bucket, nil +} + +// EnsureAWSBucket creates a bucket if it does not exist in the account. +// If a different account has already created the bucket, that is treated as an error to prevent "preimage" attacks. +func EnsureAWSBucket(ctx context.Context, creds *credentials.Credentials, bucketName string) error { + // These don't need to be in the same region, so we pick a region arbitrarily + location := "us-east-2" + + client, err := newAWSClient(ctx, creds) + if err != nil { + return err + } + + // Note that this lists only our buckets, so we know that someone else hasn't created the bucket + buckets, err := client.s3.ListBucketsWithContext(ctx, &s3.ListBucketsInput{}) + if err != nil { + return fmt.Errorf("error listing buckets: %w", err) + } + + var existingBucket *s3.Bucket + for _, bucket := range buckets.Buckets { + if aws.StringValue(bucket.Name) == bucketName { + existingBucket = bucket + } + } + + if existingBucket == nil { + klog.Infof("creating S3 bucket s3://%s", bucketName) + if _, err := client.s3.CreateBucketWithContext(ctx, &s3.CreateBucketInput{ + Bucket: &bucketName, + CreateBucketConfiguration: &s3.CreateBucketConfiguration{ + LocationConstraint: &location, + }, + }); err != nil { + return fmt.Errorf("error creating bucket s3://%v: %w", bucketName, err) + } + } + + return nil +} + +// DeleteAWSBucket deletes an AWS bucket. +func DeleteAWSBucket(ctx context.Context, creds *credentials.Credentials, bucketName string) error { + client, err := newAWSClient(ctx, creds) + if err != nil { + return err + } + + klog.Infof("deleting S3 bucket s3://%s", bucketName) + if _, err := client.s3.DeleteBucketWithContext(ctx, &s3.DeleteBucketInput{Bucket: &bucketName}); err != nil { + return fmt.Errorf("error deleting bucket: %w", err) + } + return nil +} diff --git a/tests/e2e/kubetest2-kops/deployer/boskos.go b/tests/e2e/kubetest2-kops/deployer/boskos.go new file mode 100644 index 0000000000000..c944feb0d8cc6 --- /dev/null +++ b/tests/e2e/kubetest2-kops/deployer/boskos.go @@ -0,0 +1,92 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package deployer + +import ( + "context" + "fmt" + "os" + "time" + + "k8s.io/klog/v2" + "sigs.k8s.io/boskos/client" + "sigs.k8s.io/boskos/common" + "sigs.k8s.io/kubetest2/pkg/boskos" +) + +type boskosHelper struct { + // boskos holds the client we use for boskos communication. + boskos *client.Client + + // this channel serves as a signal channel for the boskos heartbeat goroutine + // so that it can be explicitly closed + boskosHeartbeatClose chan struct{} + + // resources tracks acquired resources so they can be freed in Cleanup. + resources []*common.Resource +} + +func (h *boskosHelper) Acquire(ctx context.Context, resourceType string) (*common.Resource, error) { + if h.boskos == nil { + h.boskosHeartbeatClose = make(chan struct{}) + + boskosURL := os.Getenv("BOSKOS_HOST") + if boskosURL == "" { + boskosURL = "http://boskos.test-pods.svc.cluster.local." + } + boskosClient, err := boskos.NewClient(boskosURL) + if err != nil { + return nil, fmt.Errorf("failed to make boskos client for %q: %w", boskosURL, err) + } + h.boskos = boskosClient + } + + resource, err := boskos.Acquire( + h.boskos, + resourceType, + 5*time.Minute, + 5*time.Minute, + h.boskosHeartbeatClose, + ) + if err != nil { + return nil, fmt.Errorf("failed to get %q resource from boskos: %w", resourceType, err) + } + h.resources = append(h.resources, resource) + + return resource, nil +} + +// Cleanup releases any resources acquired from boskos +func (h *boskosHelper) Cleanup(ctx context.Context) error { + if h.boskos != nil { + var resourceNames []string + for _, resource := range h.resources { + klog.V(2).Info("releasing boskos resource %v %q", resource.Type, resource.Name) + resourceNames = append(resourceNames, resource.Name) + } + err := boskos.Release( + h.boskos, + resourceNames, + h.boskosHeartbeatClose, + ) + if err != nil { + return fmt.Errorf("failed to release boskos resources %v: %w", resourceNames, err) + } + } + + return nil +} diff --git a/tests/e2e/kubetest2-kops/deployer/common.go b/tests/e2e/kubetest2-kops/deployer/common.go index 5d42d4c568879..1f67880f1ceaf 100644 --- a/tests/e2e/kubetest2-kops/deployer/common.go +++ b/tests/e2e/kubetest2-kops/deployer/common.go @@ -17,30 +17,31 @@ limitations under the License. package deployer import ( + "context" "errors" "fmt" "os" "path" "path/filepath" "strings" - "time" + "github.com/aws/aws-sdk-go/aws/credentials" "k8s.io/klog/v2" + "k8s.io/kops/tests/e2e/kubetest2-kops/aws" "k8s.io/kops/tests/e2e/kubetest2-kops/gce" "k8s.io/kops/tests/e2e/pkg/kops" "k8s.io/kops/tests/e2e/pkg/target" "k8s.io/kops/tests/e2e/pkg/util" - "sigs.k8s.io/kubetest2/pkg/boskos" ) func (d *deployer) init() error { var err error - d.doInit.Do(func() { err = d.initialize() }) + d.doInit.Do(func() { err = d.initialize(context.TODO()) }) return err } // initialize should only be called by init(), behind a sync.Once -func (d *deployer) initialize() error { +func (d *deployer) initialize(ctx context.Context) error { if d.commonOptions.ShouldBuild() { if err := d.verifyBuildFlags(); err != nil { return fmt.Errorf("init failed to check build flags: %v", err) @@ -67,6 +68,27 @@ func (d *deployer) initialize() error { switch d.CloudProvider { case "aws": + if d.BoskosResourceType != "" { + klog.V(1).Info("acquiring AWS credentials from Boskos") + + resource, err := d.boskos.Acquire(ctx, d.BoskosResourceType) + if err != nil { + return fmt.Errorf("init failed to get resource %q from boskos: %w", d.BoskosResourceType, err) + } + klog.Infof("got AWS account %q from boskos", resource.Name) + + accessKeyIDObj, ok := resource.UserData.Load("access-key-id") + if !ok { + return fmt.Errorf("access-key-id not found in boskos resource %q", resource.Name) + } + secretAccessKeyObj, ok := resource.UserData.Load("secret-access-key") + if !ok { + return fmt.Errorf("secret-access-key not found in boskos resource %q", resource.Name) + } + d.awsCredentials = credentials.NewStaticCredentials(accessKeyIDObj.(string), secretAccessKeyObj.(string), "") + d.createStateStoreBucket = true + } + if d.SSHPrivateKeyPath == "" || d.SSHPublicKeyPath == "" { publicKeyPath, privateKeyPath, err := util.CreateSSHKeyPair(d.ClusterName) if err != nil { @@ -87,21 +109,10 @@ func (d *deployer) initialize() error { if d.GCPProject == "" { klog.V(1).Info("No GCP project provided, acquiring from Boskos") - boskosClient, err := boskos.NewClient("http://boskos.test-pods.svc.cluster.local.") + resourceType := "gce-project" + resource, err := d.boskos.Acquire(ctx, resourceType) if err != nil { - return fmt.Errorf("failed to make boskos client: %s", err) - } - d.boskos = boskosClient - - resource, err := boskos.Acquire( - d.boskos, - "gce-project", - 5*time.Minute, - 5*time.Minute, - d.boskosHeartbeatClose, - ) - if err != nil { - return fmt.Errorf("init failed to get project from boskos: %s", err) + return fmt.Errorf("init failed to get %q resource from boskos: %w", resourceType, err) } d.GCPProject = resource.Name klog.V(1).Infof("Got project %s from boskos", d.GCPProject) @@ -114,10 +125,14 @@ func (d *deployer) initialize() error { d.SSHPrivateKeyPath = privateKey d.SSHPublicKeyPath = publicKey } - d.createBucket = true + d.createStateStoreBucket = true } } + if err := d.initStateStore(ctx); err != nil { + return err + } + if d.SSHUser == "" { d.SSHUser = os.Getenv("KUBE_SSH_USER") } @@ -178,7 +193,7 @@ func (d *deployer) env() []string { vars = append(vars, []string{ fmt.Sprintf("PATH=%v", os.Getenv("PATH")), fmt.Sprintf("HOME=%v", os.Getenv("HOME")), - fmt.Sprintf("KOPS_STATE_STORE=%v", d.stateStore()), + fmt.Sprintf("KOPS_STATE_STORE=%v", d.stateStore), fmt.Sprintf("KOPS_FEATURE_FLAGS=%v", d.featureFlags()), "KOPS_RUN_TOO_NEW_VERSION=1", }...) @@ -201,6 +216,25 @@ func (d *deployer) env() []string { // Recognized by the e2e framework // https://github.com/kubernetes/kubernetes/blob/a750d8054a6cb3167f495829ce3e77ab0ccca48e/test/e2e/framework/ssh/ssh.go#L59-L62 vars = append(vars, fmt.Sprintf("KUBE_SSH_KEY_PATH=%v", d.SSHPrivateKeyPath)) + + if d.awsCredentials != nil { + credentials, err := d.awsCredentials.Get() + if err != nil { + klog.Fatalf("error getting aws credentials: %v", err) + } + if credentials.AccessKeyID != "" { + klog.Infof("setting AWS_ACCESS_KEY_ID") + vars = append(vars, fmt.Sprintf("AWS_ACCESS_KEY_ID=%v", credentials.AccessKeyID)) + } else { + klog.Warningf("AWS credentials configured but AWS_ACCESS_KEY_ID was empty") + } + if credentials.SecretAccessKey != "" { + klog.Infof("setting AWS_SECRET_ACCESS_KEY") + vars = append(vars, fmt.Sprintf("AWS_SECRET_ACCESS_KEY=%v", credentials.SecretAccessKey)) + } else { + klog.Warningf("AWS credentials configured but AWS_SECRET_ACCESS_KEY was empty") + } + } } else if d.CloudProvider == "digitalocean" { // Pass through some env vars if set for _, k := range []string{"DIGITALOCEAN_ACCESS_TOKEN", "S3_ACCESS_KEY_ID", "S3_SECRET_ACCESS_KEY"} { @@ -258,22 +292,52 @@ func defaultClusterName(cloudProvider string) (string, error) { return fmt.Sprintf("e2e-%s.%s", jobName, suffix), nil } -// stateStore returns the kops state store to use -// defaulting to values used in prow jobs -func (d *deployer) stateStore() string { +// initStateStore initializes the kops state store to use +// defaulting to values used in prow jobs, +// but creating a bucket if we are using a dynamic bucket. +func (d *deployer) initStateStore(ctx context.Context) error { ss := os.Getenv("KOPS_STATE_STORE") - if ss == "" { - switch d.CloudProvider { - case "aws": - ss = "s3://k8s-kops-prow" - case "gce": - d.createBucket = true + + switch d.CloudProvider { + case "aws": + if d.createStateStoreBucket { + bucketName, err := aws.AWSBucketName(ctx, d.awsCredentials) + if err != nil { + return fmt.Errorf("error building aws bucket name: %w", err) + } + + if err := aws.EnsureAWSBucket(ctx, d.awsCredentials, bucketName); err != nil { + return err + } + + ss = "s3://" + bucketName + } else { + if ss == "" { + ss = "s3://k8s-kops-prow" + } + } + case "gce": + if d.createStateStoreBucket { ss = "gs://" + gce.GCSBucketName(d.GCPProject) - case "digitalocean": - ss = "do://e2e-kops-space" + if err := gce.EnsureGCSBucket(ss, d.GCPProject); err != nil { + return err + } + } + case "digitalocean": + ss = "do://e2e-kops-space" + + default: + if d.createStateStoreBucket { + return fmt.Errorf("bucket creation not implemented for cloud %q", d.CloudProvider) } } - return ss + + if ss == "" { + return fmt.Errorf("cannot determine KOPS_STATE_STORE") + } + + d.stateStore = ss + return nil } // the default is $ARTIFACTS if set, otherwise ./_artifacts diff --git a/tests/e2e/kubetest2-kops/deployer/deployer.go b/tests/e2e/kubetest2-kops/deployer/deployer.go index 5ad0289636228..c869f1cc6f387 100644 --- a/tests/e2e/kubetest2-kops/deployer/deployer.go +++ b/tests/e2e/kubetest2-kops/deployer/deployer.go @@ -22,13 +22,13 @@ import ( "sync" "time" + "github.com/aws/aws-sdk-go/aws/credentials" "github.com/octago/sflags/gen/gpflag" "github.com/spf13/pflag" "k8s.io/klog/v2" "k8s.io/kops/tests/e2e/kubetest2-kops/builder" "k8s.io/kops/tests/e2e/pkg/target" - "sigs.k8s.io/boskos/client" "sigs.k8s.io/kubetest2/pkg/types" ) @@ -55,7 +55,6 @@ type deployer struct { Env []string `flag:"env" desc:"Additional env vars to set for kops commands in NAME=VALUE format"` CreateArgs string `flag:"create-args" desc:"Extra space-separated arguments passed to 'kops create cluster'"` KopsBinaryPath string `flag:"kops-binary-path" desc:"The path to kops executable used for testing"` - createBucket bool `flag:"-"` ControlPlaneIGOverrides []string `flag:"control-plane-instance-group-overrides" desc:"overrides for the control plane instance groups"` NodeIGOverrides []string `flag:"node-instance-group-overrides" desc:"overrides for the node instance groups"` @@ -82,13 +81,17 @@ type deployer struct { manifestPath string terraform *target.Terraform - // boskos struct field will be non-nil when the deployer is - // using boskos to acquire a GCP project - boskos *client.Client + BoskosResourceType string `flag:"boskos-resource-type" desc:"Resource type to acquire from boskos, for credentials"` - // this channel serves as a signal channel for the hearbeat goroutine - // so that it can be explicitly closed - boskosHeartbeatClose chan struct{} + boskos boskosHelper + + // awsCredentials holds credentials for AWS loaded from boskos + awsCredentials *credentials.Credentials + + // stateStore holds the kops state-store URL + stateStore string + + createStateStoreBucket bool `flag:"-"` } // assert that New implements types.NewDeployer @@ -106,9 +109,8 @@ func (d *deployer) Provider() string { func New(opts types.Options) (types.Deployer, *pflag.FlagSet) { // create a deployer object and set fields that are not flag controlled d := &deployer{ - commonOptions: opts, - BuildOptions: &builder.BuildOptions{}, - boskosHeartbeatClose: make(chan struct{}), + commonOptions: opts, + BuildOptions: &builder.BuildOptions{}, } dir, err := defaultArtifactsDir() diff --git a/tests/e2e/kubetest2-kops/deployer/down.go b/tests/e2e/kubetest2-kops/deployer/down.go index d35d05d21c4aa..cf58262263f24 100644 --- a/tests/e2e/kubetest2-kops/deployer/down.go +++ b/tests/e2e/kubetest2-kops/deployer/down.go @@ -17,16 +17,19 @@ limitations under the License. package deployer import ( + "context" "fmt" "strings" "k8s.io/klog/v2" + "k8s.io/kops/tests/e2e/kubetest2-kops/aws" "k8s.io/kops/tests/e2e/kubetest2-kops/gce" - "sigs.k8s.io/kubetest2/pkg/boskos" "sigs.k8s.io/kubetest2/pkg/exec" ) func (d *deployer) Down() error { + ctx := context.TODO() + if err := d.init(); err != nil { return err } @@ -54,20 +57,26 @@ func (d *deployer) Down() error { return err } - if d.CloudProvider == "gce" && d.createBucket { - gce.DeleteGCSBucket(d.stateStore(), d.GCPProject) - } + if d.createStateStoreBucket { + switch d.CloudProvider { + case "gce": + gce.DeleteGCSBucket(d.stateStore, d.GCPProject) + case "aws": + bucketName, err := aws.AWSBucketName(ctx, d.awsCredentials) + if err != nil { + return fmt.Errorf("error building aws bucket name: %w", err) + } - if d.boskos != nil { - klog.V(2).Info("releasing boskos project") - err := boskos.Release( - d.boskos, - []string{d.GCPProject}, - d.boskosHeartbeatClose, - ) - if err != nil { - return fmt.Errorf("down failed to release boskos project: %s", err) + if err := aws.DeleteAWSBucket(ctx, d.awsCredentials, bucketName); err != nil { + klog.Warningf("error deleting AWS bucket: %w", err) + } + default: + return fmt.Errorf("bucket cleanup not implemented for cloud %q", d.CloudProvider) } } + + if err := d.boskos.Cleanup(ctx); err != nil { + return err + } return nil } diff --git a/tests/e2e/kubetest2-kops/deployer/template.go b/tests/e2e/kubetest2-kops/deployer/template.go index 1e2453bfed2a6..775771775cabb 100644 --- a/tests/e2e/kubetest2-kops/deployer/template.go +++ b/tests/e2e/kubetest2-kops/deployer/template.go @@ -78,7 +78,7 @@ func (d *deployer) templateValues(zones []string, publicIP string) (map[string]i "clusterName": d.ClusterName, "kubernetesVersion": d.KubernetesVersion, "publicIP": publicIP, - "stateStore": d.stateStore(), + "stateStore": d.stateStore, "zones": zones, "sshPublicKey": string(publicKey), }, nil diff --git a/tests/e2e/kubetest2-kops/deployer/up.go b/tests/e2e/kubetest2-kops/deployer/up.go index c8a3eef8b7e0d..120f6d4d0a72f 100644 --- a/tests/e2e/kubetest2-kops/deployer/up.go +++ b/tests/e2e/kubetest2-kops/deployer/up.go @@ -47,12 +47,6 @@ func (d *deployer) Up() error { _ = d.Down() } - if d.CloudProvider == "gce" && d.createBucket { - if err := gce.EnsureGCSBucket(d.stateStore(), d.GCPProject); err != nil { - return err - } - } - adminAccess := d.AdminAccess if adminAccess == "" { publicIP, err := util.ExternalIPRange()