diff --git a/magpie-aws/src/main/java/io/openraven/magpie/plugins/aws/discovery/services/S3Discovery.java b/magpie-aws/src/main/java/io/openraven/magpie/plugins/aws/discovery/services/S3Discovery.java index 6c57c6ee..a37d3121 100644 --- a/magpie-aws/src/main/java/io/openraven/magpie/plugins/aws/discovery/services/S3Discovery.java +++ b/magpie-aws/src/main/java/io/openraven/magpie/plugins/aws/discovery/services/S3Discovery.java @@ -143,7 +143,7 @@ public void discover(ObjectMapper mapper, Session session, Region region, Emitte discoverVersioning(client, bucket, data); discoverLifeCycleConfiguration(client, bucket, data); discoverBucketTags(client, bucket, data, mapper); - discoverSize(bucket, data, clientCreator); + discoverSize(bucket, data, clientCreator, logger); discoverCloudWatchMetricsConfig(client, bucket, data, clientCreator, logger, mapper); emitter.emit(VersionedMagpieEnvelopeProvider.create(session, List.of(fullService() + ":bucket"), data.toJsonNode())); @@ -225,7 +225,7 @@ private void discoverPublic(S3Client client, Bucket resource, MagpieAwsResource logger.debug("Failure on S3 public access discovery, BucketName: {}, Reason: {}", resource.name(), ex.getMessage()); } - // wrap into a try/catch so that if there isn't an policy status response we catch it, default to false, and continue + // wrap into a try/catch so that if there isn't a policy status response we catch it, default to false, and continue try { GetBucketPolicyStatusResponse bucketPolicyStatus = client.getBucketPolicyStatus(GetBucketPolicyStatusRequest.builder() @@ -334,18 +334,20 @@ private void discoverLogging(S3Client client, Bucket resource, MagpieAwsResource private void discoverMetrics(S3Client client, Bucket resource, MagpieAwsResource data) { final String keyname = "MetricsConfiguration"; final String bucketName = resource.name(); - var result = client.listBucketMetricsConfigurations(ListBucketMetricsConfigurationsRequest.builder().bucket(bucketName).build()); - result.metricsConfigurationList().forEach( - config -> { - getAwsResponse( - () -> client.getBucketMetricsConfiguration(GetBucketMetricsConfigurationRequest.builder().id(config.id()).bucket(bucketName).build()).metricsConfiguration(), - (resp) -> AWSUtils.update(data.supplementaryConfiguration, Map.of(keyname, resp)), - (noresp) -> AWSUtils.update(data.supplementaryConfiguration, Map.of(keyname, noresp)) - ); - } + getAwsResponse( + () -> client.listBucketMetricsConfigurations(ListBucketMetricsConfigurationsRequest.builder().bucket(bucketName).build()), + (resp) -> + resp.metricsConfigurationList().forEach( + config -> { + getAwsResponse( + () -> client.getBucketMetricsConfiguration(GetBucketMetricsConfigurationRequest.builder().id(config.id()).bucket(bucketName).build()).metricsConfiguration(), + (r) -> AWSUtils.update(data.supplementaryConfiguration, Map.of(keyname, r)), + (noresp) -> AWSUtils.update(data.supplementaryConfiguration, Map.of(keyname, noresp)) + ); + } + ), + (noresp) -> AWSUtils.update(data.supplementaryConfiguration, Map.of(keyname, noresp)) ); - - } private void discoverNotifications(S3Client client, Bucket resource, MagpieAwsResource data) { @@ -460,6 +462,12 @@ public static Map getAllAvailableS3Metrics(String regionID, Stri } } return requestMetrics; + } catch (SdkServiceException ex) { + if (!(ex.statusCode() == 403 || ex.statusCode() == 404)) { + throw ex; + } + logger.info("Failure on available S3 metrics discovery, BucketName: {}, Reason: {}", bucketName, ex.getMessage()); + return new HashMap<>(); } } @@ -468,48 +476,54 @@ private JsonNode discoverEnhancedCloudWatchMetrics(MagpieAwsResource data, Magpi return mapper.valueToTree(requestMetrics); } - private void discoverSize(Bucket resource, MagpieAwsResource data, MagpieAWSClientCreator clientCreator) { - - // get the different bucket size metrics available - List storageTypeDimensions = AWSUtils.getS3AvailableSizeMetrics(data.awsRegion, data.resourceName, clientCreator); - - List> storageTypeMap = new ArrayList<>(); + private void discoverSize(Bucket resource, MagpieAwsResource data, MagpieAWSClientCreator clientCreator, Logger logger) { + try { + // get the different bucket size metrics available + List storageTypeDimensions = AWSUtils.getS3AvailableSizeMetrics(data.awsRegion, data.resourceName, clientCreator); + + List> storageTypeMap = new ArrayList<>(); + + // run through all the available metrics and make cloudwatch calls to get bucket size + for (String storageType : storageTypeDimensions) { + List dimensions = new ArrayList<>(); + dimensions.add(Dimension.builder().name("BucketName").value(resource.name()).build()); + dimensions.add(Dimension.builder().name("StorageType").value(storageType).build()); + Pair bucketSizeBytes = + AWSUtils.getCloudwatchMetricMaximum(data.awsRegion, "AWS/S3", "BucketSizeBytes", dimensions, clientCreator); + + // we are leaving it boxed due to the insertion into the Map below + final Long bucketSizeMetric = bucketSizeBytes.getValue0(); + if (bucketSizeMetric != null) { + storageTypeMap.add(Map.of(storageType, bucketSizeMetric)); + } + } + data.supplementaryConfiguration = AWSUtils.update(data.supplementaryConfiguration, Map.of("storageTypeSizeInBytes", storageTypeMap)); - // run through all the available metrics and make cloudwatch calls to get bucket size - for (String storageType : storageTypeDimensions) { List dimensions = new ArrayList<>(); dimensions.add(Dimension.builder().name("BucketName").value(resource.name()).build()); - dimensions.add(Dimension.builder().name("StorageType").value(storageType).build()); + dimensions.add(Dimension.builder().name("StorageType").value("StandardStorage").build()); Pair bucketSizeBytes = AWSUtils.getCloudwatchMetricMaximum(data.awsRegion, "AWS/S3", "BucketSizeBytes", dimensions, clientCreator); - // we are leaving it boxed due to the insertion into the Map below - final Long bucketSizeMetric = bucketSizeBytes.getValue0(); - if (bucketSizeMetric != null) { - storageTypeMap.add(Map.of(storageType, bucketSizeMetric)); + List dimensions2 = new ArrayList<>(); + dimensions2.add(Dimension.builder().name("BucketName").value(resource.name()).build()); + dimensions2.add(Dimension.builder().name("StorageType").value("AllStorageTypes").build()); + Pair numberOfObjects = + AWSUtils.getCloudwatchMetricMaximum(data.awsRegion, "AWS/S3", "NumberOfObjects", dimensions2, clientCreator); + + if (numberOfObjects.getValue0() != null && bucketSizeBytes.getValue0() != null) { + AWSUtils.update(data.supplementaryConfiguration, + Map.of("size", + Map.of("BucketSizeBytes", bucketSizeBytes.getValue0(), + "NumberOfObjects", numberOfObjects.getValue0()))); + + data.sizeInBytes = bucketSizeBytes.getValue0(); } - } - data.supplementaryConfiguration = AWSUtils.update(data.supplementaryConfiguration, Map.of("storageTypeSizeInBytes", storageTypeMap)); - - List dimensions = new ArrayList<>(); - dimensions.add(Dimension.builder().name("BucketName").value(resource.name()).build()); - dimensions.add(Dimension.builder().name("StorageType").value("StandardStorage").build()); - Pair bucketSizeBytes = - AWSUtils.getCloudwatchMetricMaximum(data.awsRegion, "AWS/S3", "BucketSizeBytes", dimensions, clientCreator); - - List dimensions2 = new ArrayList<>(); - dimensions2.add(Dimension.builder().name("BucketName").value(resource.name()).build()); - dimensions2.add(Dimension.builder().name("StorageType").value("AllStorageTypes").build()); - Pair numberOfObjects = - AWSUtils.getCloudwatchMetricMaximum(data.awsRegion, "AWS/S3", "NumberOfObjects", dimensions2, clientCreator); - - if (numberOfObjects.getValue0() != null && bucketSizeBytes.getValue0() != null) { - AWSUtils.update(data.supplementaryConfiguration, - Map.of("size", - Map.of("BucketSizeBytes", bucketSizeBytes.getValue0(), - "NumberOfObjects", numberOfObjects.getValue0()))); - - data.sizeInBytes = bucketSizeBytes.getValue0(); + } catch (SdkServiceException ex) { + if (!(ex.statusCode() == 403 || ex.statusCode() == 404)) { + throw ex; + } + logger.info("Failure on S3 bucket size discovery, BucketName: {}, Reason: {}", resource.name(), ex.getMessage()); } }