From 168293937cc7f438a3ec2dee46805aa8e12089c4 Mon Sep 17 00:00:00 2001 From: Mattie Fu Date: Fri, 12 Apr 2024 10:09:43 -0400 Subject: [PATCH] feat: migrate to OTEL and enable metrics by default (#2166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: migrate to OTEL exporter (#1788) * feat: migrate exporter to OTEL * address comments * filter out only bigtable metrics * fix test * use the bom * update * update * update completeResultCode * add a comment * address comments * address comments * update pom * small fix * also check timestamp * address comment * updates * update * do not block on shutdown * chore: refactor factory class (#2081) * chore: refactor TracerFactory creation (#2102) * feat: migrate built in metrics to OTEL (#1796) * feat: migrate exporter to OTEL * address comments * filter out only bigtable metrics * fix test * use the bom * update * update * feat: migrate builtin metrics to OTEl * update completeResultCode * add a comment * udpate * fix tests * remove unrelated changes * fix tests * add documentation * fix test * merge exporter changes * address comments * rebase on otel * revert changes in stats * fix import * update * merge back the endpoint change * refactor constants and settings * refactor and fix tests * remove unused dependency * add some javadoc * address part of the comments * update test * test with nano * measure everything in nanos and publish with double histogram * address comments * fix test * add toString * merge from main * feat: migrate per connection error count metric to otel (#2133) * feat: migrate per connection error count metric to otel * update test * address comments * remove unnecessary check * clean up statsRecorder * remove dependency * address comments * feat: migrate per connection error exporter to otel (#2152) * chore: clean up stats package (#2163) * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix clirr and integration test * fix clirr and integration test * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * log more information on test failures * address comments * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * include version * fix flaky test * remove otel dependencies since they're added to shared dependencies * backport new tests since 2.37 * set feature flag when metrics is enabled * record metrics whenever it's registered * record metrics whenever it's registered * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * remove stale reference * clean up --------- Co-authored-by: Owl Bot --- .../clirr-ignored-differences.xml | 42 -- google-cloud-bigtable-stats/pom.xml | 269 ------- .../BigtableCreateTimeSeriesExporter.java | 91 --- .../stats/BigtableStackdriverExportUtils.java | 367 ---------- .../BigtableStackdriverStatsExporter.java | 102 --- .../stats/BuiltinMeasureConstants.java | 97 --- .../bigtable/stats/BuiltinViewConstants.java | 221 ------ .../cloud/bigtable/stats/BuiltinViews.java | 64 -- .../stats/ConsumerEnvironmentUtils.java | 57 -- .../bigtable/stats/StatsRecorderWrapper.java | 135 ---- .../StatsRecorderWrapperForConnection.java | 57 -- .../cloud/bigtable/stats/StatsWrapper.java | 74 -- .../META-INF/license/apache2-LICENSE.txt | 201 ------ .../BigtableCreateTimeSeriesExporterTest.java | 321 --------- .../stats/ITBuiltinViewConstantsTest.java | 38 - .../stats/StatsRecorderWrapperTest.java | 513 -------------- .../clirr-ignored-differences.xml | 11 + google-cloud-bigtable/pom.xml | 65 +- .../data/v2/BigtableDataClientFactory.java | 25 +- .../data/v2/BigtableDataSettings.java | 72 +- .../data/v2/stub/EnhancedBigtableStub.java | 119 +++- .../v2/stub/EnhancedBigtableStubSettings.java | 45 ++ .../RateLimitingServerStreamingCallable.java | 2 +- .../BigtableCloudMonitoringExporter.java | 356 ++++++++++ .../stub/metrics/BigtableExporterUtils.java | 347 +++++++++ .../metrics/BigtableGrpcStreamTracer.java | 2 +- .../stub/metrics/BuiltinMetricsConstants.java | 220 ++++++ .../v2/stub/metrics/BuiltinMetricsTracer.java | 144 +++- .../metrics/BuiltinMetricsTracerFactory.java | 109 ++- .../v2/stub/metrics/BuiltinMetricsView.java | 59 ++ .../CustomOpenTelemetryMetricsProvider.java | 70 ++ .../stub/metrics/DefaultMetricsProvider.java | 63 ++ .../ErrorCountPerConnectionMetricTracker.java | 41 +- .../data/v2/stub/metrics/MetricsProvider.java | 25 + .../v2/stub/metrics/NoopMetricsProvider.java | 36 + .../metrics/TracedBatcherUnaryCallable.java | 4 +- .../v2/BigtableDataClientFactoryTest.java | 12 +- .../bigtable/data/v2/it/BuiltinMetricsIT.java | 333 +++++++-- .../bigtable/data/v2/it/MetricsITUtils.java | 37 + .../v2/it/StreamingMetricsMetadataIT.java | 123 +++- .../data/v2/it/UnaryMetricsMetadataIT.java | 138 +++- .../EnhancedBigtableStubSettingsTest.java | 1 + .../BigtableCloudMonitoringExporterTest.java | 310 ++++++++ .../metrics/BigtableTracerCallableTest.java | 31 +- .../stub/metrics/BuiltinMetricsTestUtils.java | 112 +++ .../metrics/BuiltinMetricsTracerTest.java | 663 ++++++++++-------- .../metrics/ErrorCountPerConnectionTest.java | 164 +++-- .../v2/stub/metrics/MetricsTracerTest.java | 16 +- pom.xml | 16 - 49 files changed, 3147 insertions(+), 3273 deletions(-) delete mode 100644 google-cloud-bigtable-stats/clirr-ignored-differences.xml delete mode 100644 google-cloud-bigtable-stats/pom.xml delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java delete mode 100644 google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java delete mode 100644 google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt delete mode 100644 google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java delete mode 100644 google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java delete mode 100644 google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java create mode 100644 google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java create mode 100644 google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java diff --git a/google-cloud-bigtable-stats/clirr-ignored-differences.xml b/google-cloud-bigtable-stats/clirr-ignored-differences.xml deleted file mode 100644 index aa9be424a8..0000000000 --- a/google-cloud-bigtable-stats/clirr-ignored-differences.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - 7005 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - *StatsRecorderWrapper* - *StatsRecorder* - - - - 7002 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - void record(java.lang.String, java.lang.String, java.lang.String, java.lang.String) - - - - 7002 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - void putBatchRequestThrottled(long) - - - - 7005 - com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection - *StatsRecorderWrapperForConnection* - * - - - - 7002 - com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils$ResourceUtilsWrapper - * - - - - 7006 - com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils$ResourceUtilsWrapper - * - * - - diff --git a/google-cloud-bigtable-stats/pom.xml b/google-cloud-bigtable-stats/pom.xml deleted file mode 100644 index 92d3e9fa8e..0000000000 --- a/google-cloud-bigtable-stats/pom.xml +++ /dev/null @@ -1,269 +0,0 @@ - - - - com.google.cloud - google-cloud-bigtable-parent - 2.37.1-SNAPSHOT - - 4.0.0 - - - google-cloud-bigtable-stats - 2.37.1-SNAPSHOT - Experimental project to shade OpenCensus dependencies. - - - - - com.google.cloud - google-cloud-bigtable-deps-bom - 2.37.1-SNAPSHOT - pom - import - - - - - - - - io.opencensus - opencensus-api - - - io.opencensus - opencensus-exporter-stats-stackdriver - - - io.opencensus - opencensus-contrib-resource-util - - - io.opencensus - opencensus-impl - runtime - - - - - com.google.cloud - google-cloud-monitoring - - - - com.google.http-client - google-http-client-gson - - - com.google.http-client - google-http-client - - - - io.perfmark - perfmark-api - - - - - com.google.api.grpc - proto-google-cloud-monitoring-v3 - - - com.google.api.grpc - proto-google-common-protos - - - com.google.auth - google-auth-library-credentials - - - com.google.api - gax - - - - com.google.http-client - google-http-client - - - com.google.http-client - google-http-client-gson - - - - - com.google.api - api-common - - - com.google.api - gax-grpc - - - com.google.protobuf - protobuf-java - - - com.google.guava - guava - - - org.threeten - threetenbp - - - com.google.code.findbugs - jsr305 - - - - - com.google.http-client - google-http-client - runtime - - - - - com.google.truth - truth - test - - - junit - junit - test - - - org.mockito - mockito-core - test - - - - - - - org.apache.maven.plugins - maven-shade-plugin - 3.2.4 - - - package - - shade - - - false - true - - - - io.opencensus:* - - - - - io.opencensus - - com.google.bigtable.veneer.repackaged.io.opencensus - - - - - - - - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - 3.6.1 - - - - - - - - io.opencensus:opencensus-exporter-metrics-util:* - io.opencensus:opencensus-exporter-stats-stackdriver:* - - - - - org.codehaus.mojo - clirr-maven-plugin - - - com/google/bigtable/veneer/repackaged/** - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - enforce-version-consistency - - enforce - - - - - - - - - io.opencensus:*:[0.31.1] - io.opencensus:opencensus-proto:[0.2.0] - - - - - - - - - org.codehaus.mojo - license-maven-plugin - 2.4.0 - - - default-cli - generate-resources - - add-third-party - - - test - - io.opencensus:* - true - - - - - - - - - test - - - - - - diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java deleted file mode 100644 index d8936b0e0e..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.MonitoredResource; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.monitoring.v3.CreateTimeSeriesRequest; -import com.google.monitoring.v3.ProjectName; -import io.opencensus.exporter.metrics.util.MetricExporter; -import io.opencensus.metrics.export.Metric; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.stream.Collectors; - -final class BigtableCreateTimeSeriesExporter extends MetricExporter { - private static final Logger logger = - Logger.getLogger(BigtableCreateTimeSeriesExporter.class.getName()); - private final MetricServiceClient metricServiceClient; - private final MonitoredResource gceOrGkeMonitoredResource; - private final String clientId; - - BigtableCreateTimeSeriesExporter( - MetricServiceClient metricServiceClient, MonitoredResource gceOrGkeMonitoredResource) { - this.metricServiceClient = metricServiceClient; - this.clientId = BigtableStackdriverExportUtils.getDefaultTaskValue(); - this.gceOrGkeMonitoredResource = gceOrGkeMonitoredResource; - } - - public void export(Collection metrics) { - Map> projectToTimeSeries = new HashMap<>(); - - for (Metric metric : metrics) { - // only export bigtable metrics - if (!BigtableStackdriverExportUtils.shouldExportMetric(metric.getMetricDescriptor())) { - continue; - } - - projectToTimeSeries = - metric.getTimeSeriesList().stream() - .collect( - Collectors.groupingBy( - timeSeries -> - BigtableStackdriverExportUtils.getProjectId( - metric.getMetricDescriptor(), timeSeries, gceOrGkeMonitoredResource), - Collectors.mapping( - timeSeries -> - BigtableStackdriverExportUtils.convertTimeSeries( - metric.getMetricDescriptor(), - timeSeries, - clientId, - gceOrGkeMonitoredResource), - Collectors.toList()))); - - for (Map.Entry> entry : - projectToTimeSeries.entrySet()) { - ProjectName projectName = ProjectName.of(entry.getKey()); - CreateTimeSeriesRequest request = - CreateTimeSeriesRequest.newBuilder() - .setName(projectName.toString()) - .addAllTimeSeries(entry.getValue()) - .build(); - try { - this.metricServiceClient.createServiceTimeSeries(request); - } catch (Throwable e) { - logger.log( - Level.WARNING, - "Exception thrown when exporting TimeSeries for projectName=" - + projectName.getProject(), - e); - } - } - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java deleted file mode 100644 index cc70fbb435..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.cloud.bigtable.stats.BuiltinViewConstants.PER_CONNECTION_ERROR_COUNT_VIEW; - -import com.google.api.Distribution.BucketOptions; -import com.google.api.Distribution.BucketOptions.Explicit; -import com.google.api.Metric; -import com.google.api.MetricDescriptor.MetricKind; -import com.google.api.MonitoredResource; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Maps; -import com.google.monitoring.v3.TimeInterval; -import com.google.monitoring.v3.TypedValue; -import io.opencensus.common.Function; -import io.opencensus.common.Functions; -import io.opencensus.common.Timestamp; -import io.opencensus.metrics.LabelKey; -import io.opencensus.metrics.LabelValue; -import io.opencensus.metrics.export.Distribution; -import io.opencensus.metrics.export.Distribution.Bucket; -import io.opencensus.metrics.export.Distribution.BucketOptions.ExplicitOptions; -import io.opencensus.metrics.export.MetricDescriptor; -import io.opencensus.metrics.export.MetricDescriptor.Type; -import io.opencensus.metrics.export.Point; -import io.opencensus.metrics.export.Summary; -import io.opencensus.metrics.export.TimeSeries; -import io.opencensus.metrics.export.Value; -import java.lang.management.ManagementFactory; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.security.SecureRandom; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -class BigtableStackdriverExportUtils { - private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw"; - - @VisibleForTesting static final String GCE_RESOURCE_TYPE = "gce_instance"; - @VisibleForTesting static final String GKE_RESOURCE_TYPE = "k8s_container"; - @VisibleForTesting static final String GCE_OR_GKE_PROJECT_ID_KEY = "project_id"; - private static final Logger logger = - Logger.getLogger(BigtableStackdriverExportUtils.class.getName()); - - private static final Function typedValueDoubleFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - builder.setDoubleValue(arg); - return builder.build(); - }; - private static final Function typedValueLongFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - builder.setInt64Value(arg); - return builder.build(); - }; - private static final Function typedValueDistributionFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - return builder - .setDistributionValue(BigtableStackdriverExportUtils.createDistribution(arg)) - .build(); - }; - private static final Function typedValueSummaryFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - return builder.build(); - }; - private static final Function bucketOptionsExplicitFunction = - arg -> { - BucketOptions.Builder builder = BucketOptions.newBuilder(); - Explicit.Builder explicitBuilder = Explicit.newBuilder(); - explicitBuilder.addBounds(0.0D); - explicitBuilder.addAllBounds(arg.getBucketBoundaries()); - builder.setExplicitBuckets(explicitBuilder.build()); - return builder.build(); - }; - - // promote the following metric labels to Bigtable monitored resource labels - private static final Set PROMOTED_BIGTABLE_RESOURCE_LABELS = - ImmutableSet.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), - BuiltinMeasureConstants.INSTANCE_ID.getName(), - BuiltinMeasureConstants.CLUSTER.getName(), - BuiltinMeasureConstants.ZONE.getName(), - BuiltinMeasureConstants.TABLE.getName()); - - private static final LabelKey CLIENT_UID_LABEL_KEY = - LabelKey.create(BuiltinMeasureConstants.CLIENT_UID.getName(), "client uid"); - - static boolean isBigtableTableMetric(MetricDescriptor metricDescriptor) { - return metricDescriptor.getName().contains("bigtable") - && !metricDescriptor.getName().equals(PER_CONNECTION_ERROR_COUNT_VIEW.getName().asString()); - } - - static boolean shouldExportMetric(MetricDescriptor metricDescriptor) { - return isBigtableTableMetric(metricDescriptor) - || (metricDescriptor.getName().equals(PER_CONNECTION_ERROR_COUNT_VIEW.getName().asString()) - && (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke())); - } - - static com.google.monitoring.v3.TimeSeries convertTimeSeries( - MetricDescriptor metricDescriptor, - TimeSeries timeSeries, - String clientId, - MonitoredResource gceOrGkeMonitoredResource) { - Type metricType = metricDescriptor.getType(); - - com.google.monitoring.v3.TimeSeries.Builder builder; - if (isBigtableTableMetric(metricDescriptor)) { - builder = - setupBuilderForBigtableResource( - metricDescriptor, - MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE), - timeSeries, - clientId); - } else if (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke()) { - builder = - setupBuilderForGceOrGKEResource( - metricDescriptor, gceOrGkeMonitoredResource, timeSeries, clientId); - } else { - logger.warning( - "Trying to export metric " - + metricDescriptor.getName() - + " in a non-GCE/GKE environment."); - return com.google.monitoring.v3.TimeSeries.newBuilder().build(); - } - builder.setMetricKind(createMetricKind(metricType)); - builder.setValueType(createValueType(metricType)); - Timestamp startTimeStamp = timeSeries.getStartTimestamp(); - for (Point point : timeSeries.getPoints()) { - builder.addPoints(createPoint(point, startTimeStamp)); - } - return builder.build(); - } - - private static com.google.monitoring.v3.TimeSeries.Builder setupBuilderForBigtableResource( - MetricDescriptor metricDescriptor, - MonitoredResource.Builder monitoredResourceBuilder, - TimeSeries timeSeries, - String clientId) { - List labelKeys = metricDescriptor.getLabelKeys(); - String metricName = metricDescriptor.getName(); - List metricTagKeys = new ArrayList<>(); - List metricTagValues = new ArrayList<>(); - - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelValues.size(); i++) { - // If the label is defined in the monitored resource, convert it to - // a monitored resource label. Otherwise, keep it as a metric label. - if (PROMOTED_BIGTABLE_RESOURCE_LABELS.contains(labelKeys.get(i).getKey())) { - monitoredResourceBuilder.putLabels( - labelKeys.get(i).getKey(), labelValues.get(i).getValue()); - } else { - metricTagKeys.add(labelKeys.get(i)); - metricTagValues.add(labelValues.get(i)); - } - } - metricTagKeys.add(CLIENT_UID_LABEL_KEY); - metricTagValues.add(LabelValue.create(clientId)); - - com.google.monitoring.v3.TimeSeries.Builder builder = - com.google.monitoring.v3.TimeSeries.newBuilder(); - builder.setResource(monitoredResourceBuilder.build()); - builder.setMetric(createMetric(metricName, metricTagKeys, metricTagValues)); - - return builder; - } - - private static com.google.monitoring.v3.TimeSeries.Builder setupBuilderForGceOrGKEResource( - MetricDescriptor metricDescriptor, - MonitoredResource gceOrGkeMonitoredResource, - TimeSeries timeSeries, - String clientId) { - List labelKeys = metricDescriptor.getLabelKeys(); - String metricName = metricDescriptor.getName(); - List metricTagKeys = new ArrayList<>(); - List metricTagValues = new ArrayList<>(); - - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelValues.size(); i++) { - metricTagKeys.add(labelKeys.get(i)); - metricTagValues.add(labelValues.get(i)); - } - metricTagKeys.add(CLIENT_UID_LABEL_KEY); - metricTagValues.add(LabelValue.create(clientId)); - - com.google.monitoring.v3.TimeSeries.Builder builder = - com.google.monitoring.v3.TimeSeries.newBuilder(); - builder.setResource(gceOrGkeMonitoredResource); - builder.setMetric(createMetric(metricName, metricTagKeys, metricTagValues)); - - return builder; - } - - static String getProjectId( - MetricDescriptor metricDescriptor, - TimeSeries timeSeries, - MonitoredResource gceOrGkeMonitoredResource) { - if (isBigtableTableMetric(metricDescriptor)) { - return getProjectIdForBigtableTableResource(metricDescriptor, timeSeries); - } else { - return getProjectIdForGceOrGkeResource(gceOrGkeMonitoredResource); - } - } - - static String getProjectIdForBigtableTableResource( - MetricDescriptor metricDescriptor, TimeSeries timeSeries) { - List labelKeys = metricDescriptor.getLabelKeys(); - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelKeys.size(); i++) { - if (labelKeys.get(i).getKey().equals(BuiltinMeasureConstants.PROJECT_ID.getName())) { - return labelValues.get(i).getValue(); - } - } - throw new IllegalStateException("Can't find project id for the current timeseries"); - } - - static String getProjectIdForGceOrGkeResource(MonitoredResource gceOrGkeMonitoredResource) { - if (!gceOrGkeMonitoredResource.getType().equals(GCE_RESOURCE_TYPE) - && !gceOrGkeMonitoredResource.getType().equals(GKE_RESOURCE_TYPE)) { - throw new IllegalStateException( - "Expected GCE or GKE resource type, but found " + gceOrGkeMonitoredResource); - } - return gceOrGkeMonitoredResource.getLabelsOrThrow(GCE_OR_GKE_PROJECT_ID_KEY); - } - - static String getDefaultTaskValue() { - // Something like '@' - final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); - // If not the expected format then generate a random number. - if (jvmName.indexOf('@') < 1) { - String hostname = "localhost"; - try { - hostname = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - logger.log(Level.INFO, "Unable to get the hostname.", e); - } - // Generate a random number and use the same format "random_number@hostname". - return "java-" + new SecureRandom().nextInt() + "@" + hostname; - } - return "java-" + UUID.randomUUID() + jvmName; - } - - private static MetricKind createMetricKind(Type type) { - switch (type) { - case CUMULATIVE_DOUBLE: - case CUMULATIVE_INT64: - case CUMULATIVE_DISTRIBUTION: - return MetricKind.CUMULATIVE; - default: - return MetricKind.UNRECOGNIZED; - } - } - - private static com.google.api.MetricDescriptor.ValueType createValueType(Type type) { - switch (type) { - case CUMULATIVE_DOUBLE: - return com.google.api.MetricDescriptor.ValueType.DOUBLE; - case CUMULATIVE_INT64: - return com.google.api.MetricDescriptor.ValueType.INT64; - case CUMULATIVE_DISTRIBUTION: - return com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; - default: - return com.google.api.MetricDescriptor.ValueType.UNRECOGNIZED; - } - } - - private static Metric createMetric( - String metricName, List labelKeys, List labelValues) { - Metric.Builder builder = Metric.newBuilder(); - builder.setType(metricName); - Map stringTagMap = Maps.newHashMap(); - - for (int i = 0; i < labelValues.size(); ++i) { - String value = labelValues.get(i).getValue(); - if (value != null) { - stringTagMap.put(labelKeys.get(i).getKey(), value); - } - } - - builder.putAllLabels(stringTagMap); - return builder.build(); - } - - private static com.google.monitoring.v3.Point createPoint(Point point, Timestamp startTimestamp) { - com.google.monitoring.v3.TimeInterval.Builder timeIntervalBuilder = TimeInterval.newBuilder(); - timeIntervalBuilder.setStartTime(convertTimestamp(startTimestamp)); - timeIntervalBuilder.setEndTime(convertTimestamp(point.getTimestamp())); - - com.google.monitoring.v3.Point.Builder builder = com.google.monitoring.v3.Point.newBuilder(); - builder.setInterval(timeIntervalBuilder.build()); - builder.setValue(createTypedValue(point.getValue())); - return builder.build(); - } - - private static TypedValue createTypedValue(Value value) { - return value.match( - typedValueDoubleFunction, - typedValueLongFunction, - typedValueDistributionFunction, - typedValueSummaryFunction, - Functions.throwIllegalArgumentException()); - } - - private static com.google.api.Distribution createDistribution(Distribution distribution) { - com.google.api.Distribution.Builder builder = - com.google.api.Distribution.newBuilder() - .setBucketOptions(createBucketOptions(distribution.getBucketOptions())) - .setCount(distribution.getCount()) - .setMean( - distribution.getCount() == 0L - ? 0.0D - : distribution.getSum() / (double) distribution.getCount()) - .setSumOfSquaredDeviation(distribution.getSumOfSquaredDeviations()); - setBucketCounts(distribution.getBuckets(), builder); - return builder.build(); - } - - private static BucketOptions createBucketOptions( - @Nullable Distribution.BucketOptions bucketOptions) { - com.google.api.Distribution.BucketOptions.Builder builder = BucketOptions.newBuilder(); - return bucketOptions == null - ? builder.build() - : bucketOptions.match( - bucketOptionsExplicitFunction, Functions.throwIllegalArgumentException()); - } - - private static void setBucketCounts( - List buckets, com.google.api.Distribution.Builder builder) { - builder.addBucketCounts(0L); - - for (Bucket bucket : buckets) { - builder.addBucketCounts(bucket.getCount()); - } - } - - private static com.google.protobuf.Timestamp convertTimestamp(Timestamp censusTimestamp) { - return censusTimestamp.getSeconds() < 0L - ? com.google.protobuf.Timestamp.newBuilder().build() - : com.google.protobuf.Timestamp.newBuilder() - .setSeconds(censusTimestamp.getSeconds()) - .setNanos(censusTimestamp.getNanos()) - .build(); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java deleted file mode 100644 index 856353cfd0..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.MonitoredResource; -import com.google.api.core.InternalApi; -import com.google.api.gax.core.FixedCredentialsProvider; -import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; -import com.google.auth.Credentials; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.cloud.monitoring.v3.MetricServiceSettings; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.MoreObjects; -import com.google.common.base.Preconditions; -import io.opencensus.common.Duration; -import io.opencensus.exporter.metrics.util.IntervalMetricReader; -import io.opencensus.exporter.metrics.util.MetricReader; -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration; -import io.opencensus.metrics.Metrics; -import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.concurrent.GuardedBy; - -@InternalApi -public class BigtableStackdriverStatsExporter { - static final Object lock = new Object(); - - @Nullable - @GuardedBy("lock") - private static BigtableStackdriverStatsExporter instance = null; - - // Default export interval is 1 minute - private static final Duration EXPORT_INTERVAL = Duration.create(60, 0); - - private static final String MONITORING_ENDPOINT = - MoreObjects.firstNonNull( - System.getProperty("bigtable.test-monitoring-endpoint"), - MetricServiceSettings.getDefaultEndpoint()); - - private final IntervalMetricReader intervalMetricReader; - - private BigtableStackdriverStatsExporter( - MetricServiceClient metricServiceClient, - Duration exportInterval, - MonitoredResource gceOrGkeMonitoredResource) { - IntervalMetricReader.Options.Builder intervalMetricReaderOptionsBuilder = - IntervalMetricReader.Options.builder(); - intervalMetricReaderOptionsBuilder.setExportInterval(exportInterval); - this.intervalMetricReader = - IntervalMetricReader.create( - new BigtableCreateTimeSeriesExporter(metricServiceClient, gceOrGkeMonitoredResource), - MetricReader.create( - MetricReader.Options.builder() - .setMetricProducerManager( - Metrics.getExportComponent().getMetricProducerManager()) - .build()), - intervalMetricReaderOptionsBuilder.build()); - } - - public static void register(Credentials credentials) throws IOException { - synchronized (lock) { - Preconditions.checkState( - instance == null, "Bigtable Stackdriver stats exporter is already created"); - // Default timeout for creating a client is 1 minute - MetricServiceClient client = createMetricServiceClient(credentials, Duration.create(60L, 0)); - MonitoredResource gceOrGkeMonitoredResource = null; - if (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke()) { - gceOrGkeMonitoredResource = - StackdriverStatsConfiguration.builder().build().getMonitoredResource(); - } - instance = - new BigtableStackdriverStatsExporter(client, EXPORT_INTERVAL, gceOrGkeMonitoredResource); - } - } - - @GuardedBy("lock") - @VisibleForTesting - static MetricServiceClient createMetricServiceClient(Credentials credentials, Duration deadline) - throws IOException { - MetricServiceSettings.Builder settingsBuilder = - MetricServiceSettings.newBuilder() - .setTransportChannelProvider(InstantiatingGrpcChannelProvider.newBuilder().build()); - settingsBuilder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); - settingsBuilder.setEndpoint(MONITORING_ENDPOINT); - org.threeten.bp.Duration timeout = org.threeten.bp.Duration.ofMillis(deadline.toMillis()); - settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout); - return MetricServiceClient.create(settingsBuilder.build()); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java deleted file mode 100644 index 59e7511d41..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static io.opencensus.stats.Measure.MeasureLong; - -import io.opencensus.tags.TagKey; - -/** Built-in metrics that will be readable under bigtable.googleapis.com/client namespace */ -class BuiltinMeasureConstants { - // Monitored resource TagKeys - static final TagKey PROJECT_ID = TagKey.create("project_id"); - static final TagKey INSTANCE_ID = TagKey.create("instance"); - static final TagKey CLUSTER = TagKey.create("cluster"); - static final TagKey TABLE = TagKey.create("table"); - static final TagKey ZONE = TagKey.create("zone"); - static final TagKey CLIENT_UID = TagKey.create("client_uid"); - - // Metrics TagKeys - static final TagKey APP_PROFILE = TagKey.create("app_profile"); - static final TagKey METHOD = TagKey.create("method"); - static final TagKey STREAMING = TagKey.create("streaming"); - static final TagKey STATUS = TagKey.create("status"); - static final TagKey CLIENT_NAME = TagKey.create("client_name"); - - // Units - private static final String COUNT = "1"; - private static final String MILLISECOND = "ms"; - - // Measurements - static final MeasureLong OPERATION_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/operation_latencies", - "Total time until final operation success or failure, including retries and backoff.", - MILLISECOND); - - static final MeasureLong ATTEMPT_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/attempt_latencies", - "Client observed latency per RPC attempt.", - MILLISECOND); - - static final MeasureLong RETRY_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/retry_count", - "The number of additional RPCs sent after the initial attempt.", - COUNT); - - static final MeasureLong FIRST_RESPONSE_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/first_response_latencies", - "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.", - MILLISECOND); - - static final MeasureLong SERVER_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/server_latencies", - "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.", - MILLISECOND); - - static final MeasureLong CONNECTIVITY_ERROR_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/connectivity_error_count", - "Number of requests that failed to reach the Google datacenter. (Requests without google response headers).", - COUNT); - - static final MeasureLong APPLICATION_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/application_latencies", - "The latency of the client application consuming available response data.", - MILLISECOND); - - static final MeasureLong THROTTLING_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/throttling_latencies", - "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.", - MILLISECOND); - - static final MeasureLong PER_CONNECTION_ERROR_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "Distribution of counts of channels per 'error count per minute'.", - COUNT); -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java deleted file mode 100644 index 82ce61e2d3..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.APPLICATION_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.APP_PROFILE; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.ATTEMPT_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CLIENT_NAME; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CLUSTER; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CONNECTIVITY_ERROR_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.FIRST_RESPONSE_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.INSTANCE_ID; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.METHOD; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.OPERATION_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.PER_CONNECTION_ERROR_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.PROJECT_ID; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.RETRY_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.SERVER_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.STATUS; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.STREAMING; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.TABLE; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.THROTTLING_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.ZONE; -import static io.opencensus.stats.Aggregation.Distribution; -import static io.opencensus.stats.Aggregation.Sum; - -import com.google.common.collect.ImmutableList; -import io.opencensus.stats.Aggregation; -import io.opencensus.stats.BucketBoundaries; -import io.opencensus.stats.View; - -/** Create built-in metrics views under bigtable.googleapis.com/internal/client namespace */ -class BuiltinViewConstants { - private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 0.0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, - 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, - 250.0, 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, - 20000.0, 50000.0, 100000.0))); - - private static final Aggregation AGGREGATION_RETRY_COUNT = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, - 100.0))); - - private static final Aggregation PER_CONNECTION_ERROR_COUNT_AGGREGATION = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 1.0, - 2.0, - 4.0, - 8.0, - 16.0, - 32.0, - 64.0, - 125.0, - 250.0, - 500.0, - 1_000.0, - 2_000.0, - 4_000.0, - 8_000.0, - 16_000.0, - 32_000.0, - 64_000.0, - 128_000.0, - 250_000.0, - 500_000.0, - 1_000_000.0))); - - private static final Aggregation AGGREGATION_COUNT = Sum.create(); - - static final View OPERATION_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/operation_latencies"), - "Total time until final operation success or failure, including retries and backoff.", - OPERATION_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STREAMING, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View ATTEMPT_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/attempt_latencies"), - "Client observed latency per RPC attempt.", - ATTEMPT_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STREAMING, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View RETRY_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/retry_count"), - "The number of additional RPCs sent after the initial attempt.", - RETRY_COUNT, - AGGREGATION_COUNT, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View FIRST_RESPONSE_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/first_response_latencies"), - "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.", - FIRST_RESPONSE_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View SERVER_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/server_latencies"), - "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.", - SERVER_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - STREAMING, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View CONNECTIVITY_ERROR_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/connectivity_error_count"), - "Number of requests that failed to reach the Google datacenter. (Requests without google response headers).", - CONNECTIVITY_ERROR_COUNT, - AGGREGATION_COUNT, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View APPLICATION_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/application_latencies"), - "The latency of the client application consuming available response data.", - APPLICATION_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, INSTANCE_ID, APP_PROFILE, METHOD, CLIENT_NAME, CLUSTER, ZONE, TABLE)); - - static final View THROTTLING_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/throttling_latencies"), - "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.", - THROTTLING_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, INSTANCE_ID, APP_PROFILE, METHOD, CLIENT_NAME, CLUSTER, ZONE, TABLE)); - - static final View PER_CONNECTION_ERROR_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/per_connection_error_count"), - "Distribution of counts of channels per 'error count per minute'.", - PER_CONNECTION_ERROR_COUNT, - PER_CONNECTION_ERROR_COUNT_AGGREGATION, - ImmutableList.of(PROJECT_ID, INSTANCE_ID, APP_PROFILE, CLIENT_NAME)); -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java deleted file mode 100644 index 2b91ee60c3..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; -import io.opencensus.stats.Stats; -import io.opencensus.stats.View; -import io.opencensus.stats.ViewManager; - -/** For registering built-in metric views */ -@InternalApi("For internal use only") -public class BuiltinViews { - - @VisibleForTesting - static final ImmutableSet BIGTABLE_BUILTIN_VIEWS = - ImmutableSet.of( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - BuiltinViewConstants.RETRY_COUNT_VIEW, - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW); - // We store views that don't use the Bigtable schema and need different tags in a separate set to - // simplify testing. - static final ImmutableSet NON_BIGTABLE_BUILTIN_VIEWS = - ImmutableSet.of(BuiltinViewConstants.PER_CONNECTION_ERROR_COUNT_VIEW); - - @VisibleForTesting - void registerPrivateViews(ViewManager viewManager) { - for (View view : BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - for (View view : NON_BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - } - - public static void registerBigtableBuiltinViews() { - ViewManager viewManager = Stats.getViewManager(); - for (View view : BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - for (View view : NON_BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java deleted file mode 100644 index 8c84850f6a..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.common.annotations.VisibleForTesting; -import io.opencensus.contrib.resource.util.CloudResource; -import io.opencensus.contrib.resource.util.ContainerResource; -import io.opencensus.contrib.resource.util.HostResource; -import io.opencensus.contrib.resource.util.ResourceUtils; -import io.opencensus.resource.Resource; -import java.util.Objects; - -/** A class for extracting details about consumer environments (GCE and GKE) for metrics. */ -class ConsumerEnvironmentUtils { - - private static ResourceUtilsWrapper resourceUtilsWrapper = new ResourceUtilsWrapper(); - - @VisibleForTesting - public static void setResourceUtilsWrapper(ResourceUtilsWrapper newResourceUtilsWrapper) { - resourceUtilsWrapper = newResourceUtilsWrapper; - } - - public static boolean isEnvGce() { - Resource resource = resourceUtilsWrapper.detectOpenCensusResource(); - return Objects.equals(resource.getType(), HostResource.TYPE) - && Objects.equals( - resource.getLabels().get(CloudResource.PROVIDER_KEY), CloudResource.PROVIDER_GCP); - } - - public static boolean isEnvGke() { - Resource resource = resourceUtilsWrapper.detectOpenCensusResource(); - return Objects.equals(resource.getType(), ContainerResource.TYPE) - && Objects.equals( - resource.getLabels().get(CloudResource.PROVIDER_KEY), CloudResource.PROVIDER_GCP); - } - - // We wrap the static ResourceUtils.detectResource() method in a non-static method for mocking. - @VisibleForTesting - public static class ResourceUtilsWrapper { - public Resource detectOpenCensusResource() { - return ResourceUtils.detectResource(); - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java deleted file mode 100644 index 6bf0988b91..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import com.google.api.gax.tracing.ApiTracerFactory.OperationType; -import com.google.api.gax.tracing.SpanName; -import io.opencensus.stats.MeasureMap; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.tags.TagContext; -import io.opencensus.tags.TagContextBuilder; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import io.opencensus.tags.Tagger; -import io.opencensus.tags.Tags; -import java.util.Map; - -/** A wrapper to record built-in metrics */ -@InternalApi("For internal use only") -public class StatsRecorderWrapper { - - private final OperationType operationType; - - private final Tagger tagger; - private final StatsRecorder statsRecorder; - private final TagContext parentContext; - private final SpanName spanName; - private final Map statsAttributes; - - private MeasureMap attemptMeasureMap; - private MeasureMap operationMeasureMap; - - public StatsRecorderWrapper( - OperationType operationType, - SpanName spanName, - Map statsAttributes, - StatsRecorder statsRecorder) { - this.operationType = operationType; - this.tagger = Tags.getTagger(); - this.statsRecorder = statsRecorder; - this.spanName = spanName; - this.parentContext = tagger.getCurrentTagContext(); - this.statsAttributes = statsAttributes; - - this.attemptMeasureMap = statsRecorder.newMeasureMap(); - this.operationMeasureMap = statsRecorder.newMeasureMap(); - } - - public void recordOperation(String status, String tableId, String zone, String cluster) { - TagContextBuilder tagCtx = - newTagContextBuilder(tableId, zone, cluster) - .putLocal(BuiltinMeasureConstants.STATUS, TagValue.create(status)); - - boolean isStreaming = operationType == OperationType.ServerStreaming; - tagCtx.putLocal( - BuiltinMeasureConstants.STREAMING, TagValue.create(Boolean.toString(isStreaming))); - - operationMeasureMap.record(tagCtx.build()); - // Reinitialize a new map - operationMeasureMap = statsRecorder.newMeasureMap(); - } - - public void recordAttempt(String status, String tableId, String zone, String cluster) { - TagContextBuilder tagCtx = - newTagContextBuilder(tableId, zone, cluster) - .putLocal(BuiltinMeasureConstants.STATUS, TagValue.create(status)); - - boolean isStreaming = operationType == OperationType.ServerStreaming; - tagCtx.putLocal( - BuiltinMeasureConstants.STREAMING, TagValue.create(Boolean.toString(isStreaming))); - - attemptMeasureMap.record(tagCtx.build()); - // Reinitialize a new map - attemptMeasureMap = statsRecorder.newMeasureMap(); - } - - public void putOperationLatencies(long operationLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.OPERATION_LATENCIES, operationLatency); - } - - public void putAttemptLatencies(long attemptLatency) { - attemptMeasureMap.put(BuiltinMeasureConstants.ATTEMPT_LATENCIES, attemptLatency); - } - - public void putRetryCount(int attemptCount) { - operationMeasureMap.put(BuiltinMeasureConstants.RETRY_COUNT, attemptCount); - } - - public void putApplicationLatencies(long applicationLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.APPLICATION_LATENCIES, applicationLatency); - } - - public void putFirstResponseLatencies(long firstResponseLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.FIRST_RESPONSE_LATENCIES, firstResponseLatency); - } - - public void putGfeLatencies(long serverLatency) { - attemptMeasureMap.put(BuiltinMeasureConstants.SERVER_LATENCIES, serverLatency); - } - - public void putGfeMissingHeaders(long connectivityErrors) { - attemptMeasureMap.put(BuiltinMeasureConstants.CONNECTIVITY_ERROR_COUNT, connectivityErrors); - } - - public void putClientBlockingLatencies(long clientBlockingLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.THROTTLING_LATENCIES, clientBlockingLatency); - } - - private TagContextBuilder newTagContextBuilder(String tableId, String zone, String cluster) { - TagContextBuilder tagContextBuilder = - tagger - .toBuilder(parentContext) - .putLocal(BuiltinMeasureConstants.METHOD, TagValue.create(spanName.toString())) - .putLocal(BuiltinMeasureConstants.TABLE, TagValue.create(tableId)) - .putLocal(BuiltinMeasureConstants.ZONE, TagValue.create(zone)) - .putLocal(BuiltinMeasureConstants.CLUSTER, TagValue.create(cluster)); - for (Map.Entry entry : statsAttributes.entrySet()) { - tagContextBuilder.putLocal(TagKey.create(entry.getKey()), TagValue.create(entry.getValue())); - } - return tagContextBuilder; - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java deleted file mode 100644 index 3c335d28bc..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import io.opencensus.stats.MeasureMap; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.tags.TagContext; -import io.opencensus.tags.TagContextBuilder; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import io.opencensus.tags.Tagger; -import io.opencensus.tags.Tags; -import java.util.Map; - -/** A wrapper to record built-in metrics for connection metrics not tied to operations/RPCs. */ -@InternalApi("For internal use only") -public class StatsRecorderWrapperForConnection { - private final StatsRecorder statsRecorder; - private final TagContext tagContext; - private MeasureMap perConnectionErrorCountMeasureMap; - - public StatsRecorderWrapperForConnection( - Map statsAttributes, StatsRecorder statsRecorder) { - this.statsRecorder = statsRecorder; - - this.perConnectionErrorCountMeasureMap = statsRecorder.newMeasureMap(); - - Tagger tagger = Tags.getTagger(); - TagContextBuilder tagContextBuilder = tagger.toBuilder(tagger.getCurrentTagContext()); - for (Map.Entry entry : statsAttributes.entrySet()) { - tagContextBuilder.putLocal(TagKey.create(entry.getKey()), TagValue.create(entry.getValue())); - } - this.tagContext = tagContextBuilder.build(); - } - - public void putAndRecordPerConnectionErrorCount(long errorCount) { - perConnectionErrorCountMeasureMap.put( - BuiltinMeasureConstants.PER_CONNECTION_ERROR_COUNT, errorCount); - - perConnectionErrorCountMeasureMap.record(tagContext); - perConnectionErrorCountMeasureMap = statsRecorder.newMeasureMap(); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java deleted file mode 100644 index fc6a072d01..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; - -import com.google.api.core.InternalApi; -import com.google.api.gax.tracing.SpanName; -import io.opencensus.stats.Stats; -import io.opencensus.stats.View; -import io.opencensus.tags.TagKey; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * Wrapper class for accessing opencensus. We use a shaded version of opencensus to avoid polluting - * the global opencensus namespace. And this provides a facade that will not be relocated. - */ -@InternalApi("For internal use only") -public class StatsWrapper { - public static StatsRecorderWrapper createRecorder( - OperationType operationType, SpanName spanName, Map statsAttributes) { - return new StatsRecorderWrapper( - operationType, spanName, statsAttributes, Stats.getStatsRecorder()); - } - - public static StatsRecorderWrapperForConnection createRecorderForConnection( - Map statsAttributes) { - return new StatsRecorderWrapperForConnection(statsAttributes, Stats.getStatsRecorder()); - } - - // This is used in integration tests to get the tag value strings from view manager because Stats - // is relocated to com.google.bigtable.veneer.repackaged.io.opencensus. - @InternalApi("Visible for testing") - public static List getOperationLatencyViewTagValueStrings() { - return Stats.getViewManager().getView(BuiltinViewConstants.OPERATION_LATENCIES_VIEW.getName()) - .getAggregationMap().entrySet().stream() - .map(Map.Entry::getKey) - .flatMap(x -> x.stream()) - .map(x -> x.asString()) - .collect(Collectors.toCollection(ArrayList::new)); - } - - // A workaround to run ITBuiltinViewConstantsTest as integration test. Integration test runs after - // the packaging step. Opencensus classes will be relocated when they are packaged but the - // integration test files will not be. So the integration tests can't reference any transitive - // dependencies that have been relocated. - static Map> getBigtableViewToTagMap() { - Map> map = new HashMap<>(); - for (View view : BuiltinViews.BIGTABLE_BUILTIN_VIEWS) { - List tagKeys = view.getColumns(); - map.put( - view.getName().asString(), - tagKeys.stream().map(tagKey -> tagKey.getName()).collect(Collectors.toList())); - } - return map; - } -} diff --git a/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt b/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java deleted file mode 100644 index e72b54f0bd..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.google.api.MonitoredResource; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.cloud.monitoring.v3.stub.MetricServiceStub; -import com.google.common.collect.ImmutableMap; -import com.google.monitoring.v3.CreateTimeSeriesRequest; -import com.google.protobuf.Empty; -import io.opencensus.common.Timestamp; -import io.opencensus.contrib.resource.util.CloudResource; -import io.opencensus.contrib.resource.util.ContainerResource; -import io.opencensus.contrib.resource.util.HostResource; -import io.opencensus.metrics.LabelKey; -import io.opencensus.metrics.LabelValue; -import io.opencensus.metrics.export.Metric; -import io.opencensus.metrics.export.MetricDescriptor; -import io.opencensus.metrics.export.Point; -import io.opencensus.metrics.export.TimeSeries; -import io.opencensus.metrics.export.Value; -import io.opencensus.resource.Resource; -import java.util.Arrays; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnit; -import org.mockito.junit.MockitoRule; - -@RunWith(JUnit4.class) -public class BigtableCreateTimeSeriesExporterTest { - - private static final String bigtableProjectId = "fake-bigtable-project"; - private static final String bigtableInstanceId = "fake-bigtable-instance"; - private static final String appProfileId = "default"; - private static final String tableId = "fake-table"; - private static final String bigtableZone = "us-east-1"; - private static final String bigtableCluster = "cluster-1"; - private static final String clientName = "client-name"; - private static final String gceProjectId = "fake-gce-project"; - private static final String gkeProjectId = "fake-gke-project"; - - @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); - - @Mock private MetricServiceStub mockMetricServiceStub; - private MetricServiceClient fakeMetricServiceClient; - - @Before - public void setUp() { - - fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub); - } - - @After - public void tearDown() {} - - @Test - public void testTimeSeriesForMetricWithBigtableResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter(fakeMetricServiceClient, null); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable/test", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.TABLE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLUSTER.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.ZONE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(tableId), - LabelValue.create(bigtableCluster), - LabelValue.create(bigtableZone), - LabelValue.create(appProfileId)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + bigtableProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId, - BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId, - BuiltinMeasureConstants.TABLE.getName(), tableId, - BuiltinMeasureConstants.CLUSTER.getName(), bigtableCluster, - BuiltinMeasureConstants.ZONE.getName(), bigtableZone); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - @Test - public void testTimeSeriesForMetricWithGceResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter( - fakeMetricServiceClient, - MonitoredResource.newBuilder() - .setType(BigtableStackdriverExportUtils.GCE_RESOURCE_TYPE) - .putLabels(BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, gceProjectId) - .putLabels("another-gce-key", "another-gce-value") - .build()); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - ConsumerEnvironmentUtils.ResourceUtilsWrapper resourceUtilsWrapperMock = - Mockito.mock(ConsumerEnvironmentUtils.ResourceUtilsWrapper.class); - ConsumerEnvironmentUtils.setResourceUtilsWrapper(resourceUtilsWrapperMock); - Mockito.when(resourceUtilsWrapperMock.detectOpenCensusResource()) - .thenReturn( - Resource.create( - HostResource.TYPE, - ImmutableMap.of(CloudResource.PROVIDER_KEY, CloudResource.PROVIDER_GCP))); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLIENT_NAME.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(appProfileId), - LabelValue.create(clientName)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + gceProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, - gceProjectId, - "another-gce-key", - "another-gce-value"); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.CLIENT_NAME.getName(), clientName); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - @Test - public void testTimeSeriesForMetricWithGkeResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter( - fakeMetricServiceClient, - MonitoredResource.newBuilder() - .setType(BigtableStackdriverExportUtils.GKE_RESOURCE_TYPE) - .putLabels(BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, gkeProjectId) - .putLabels("another-gke-key", "another-gke-value") - .build()); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - ConsumerEnvironmentUtils.ResourceUtilsWrapper resourceUtilsWrapperMock = - Mockito.mock(ConsumerEnvironmentUtils.ResourceUtilsWrapper.class); - ConsumerEnvironmentUtils.setResourceUtilsWrapper(resourceUtilsWrapperMock); - - Mockito.when(resourceUtilsWrapperMock.detectOpenCensusResource()) - .thenReturn( - Resource.create( - ContainerResource.TYPE, - ImmutableMap.of(CloudResource.PROVIDER_KEY, CloudResource.PROVIDER_GCP))); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLIENT_NAME.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(appProfileId), - LabelValue.create(clientName)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + gkeProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, - gkeProjectId, - "another-gke-key", - "another-gke-value"); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.CLIENT_NAME.getName(), clientName); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - private class FakeMetricServiceClient extends MetricServiceClient { - - protected FakeMetricServiceClient(MetricServiceStub stub) { - super(stub); - } - } -} diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java deleted file mode 100644 index c2dcc2a602..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertWithMessage; - -import java.util.List; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class ITBuiltinViewConstantsTest { - @Test - public void testBasicTagsExistForAllViews() { - Map> viewToTagMap = StatsWrapper.getBigtableViewToTagMap(); - for (String view : viewToTagMap.keySet()) { - assertWithMessage(view + " should have all basic tags") - .that(viewToTagMap.get(view)) - .containsAtLeast( - "project_id", "instance", "app_profile", "method", "zone", "cluster", "table"); - } - } -} diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java deleted file mode 100644 index 829202510c..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertThat; - -import com.google.api.gax.tracing.ApiTracerFactory; -import com.google.api.gax.tracing.SpanName; -import com.google.common.collect.ImmutableMap; -import io.opencensus.impl.stats.StatsComponentImpl; -import io.opencensus.stats.AggregationData; -import io.opencensus.stats.StatsComponent; -import io.opencensus.stats.View; -import io.opencensus.stats.ViewData; -import io.opencensus.stats.ViewManager; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -// Can only be run as a unit test. Opencensus classes will be relocated when they are packaged but -// the integration test files will not be. So the integration tests can't reference any transitive -// dependencies that have been relocated. To work around this, we'll have to move all the reference -// to opencensus to StatsWrapper. -@RunWith(JUnit4.class) -public class StatsRecorderWrapperTest { - - private final String PROJECT_ID = "fake-project"; - private final String INSTANCE_ID = "fake-instance"; - private final String APP_PROFILE_ID = "fake-app-profile"; - - private final String TABLE_ID = "fake-table-id"; - private final String ZONE = "fake-zone"; - private final String CLUSTER = "fake-cluster"; - private final String CLIENT_AND_VERSION = "bigtable-java/fake-version"; - - private final StatsComponent statsComponent = new StatsComponentImpl(); - - @Before - public void setup() { - BuiltinViews views = new BuiltinViews(); - views.registerPrivateViews(statsComponent.getViewManager()); - } - - @Test - public void testStreamingOperation() throws InterruptedException { - StatsRecorderWrapper recorderWrapper = - new StatsRecorderWrapper( - ApiTracerFactory.OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - ImmutableMap.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), - PROJECT_ID, - BuiltinMeasureConstants.INSTANCE_ID.getName(), - INSTANCE_ID, - BuiltinMeasureConstants.APP_PROFILE.getName(), - APP_PROFILE_ID, - BuiltinMeasureConstants.CLIENT_NAME.getName(), - CLIENT_AND_VERSION), - statsComponent.getStatsRecorder()); - - long operationLatency = 1234; - int attemptCount = 2; - long attemptLatency = 56; - long serverLatency = 78; - long applicationLatency = 901; - long connectivityErrorCount = 15; - long throttlingLatency = 50; - long firstResponseLatency = 90; - - recorderWrapper.putOperationLatencies(operationLatency); - recorderWrapper.putRetryCount(attemptCount); - recorderWrapper.putAttemptLatencies(attemptLatency); - recorderWrapper.putApplicationLatencies(applicationLatency); - recorderWrapper.putGfeLatencies(serverLatency); - recorderWrapper.putGfeMissingHeaders(connectivityErrorCount); - recorderWrapper.putFirstResponseLatencies(firstResponseLatency); - recorderWrapper.putClientBlockingLatencies(throttlingLatency); - - recorderWrapper.recordOperation("OK", TABLE_ID, ZONE, CLUSTER); - recorderWrapper.recordAttempt("OK", TABLE_ID, ZONE, CLUSTER); - - Thread.sleep(100); - - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, "OK", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(operationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.RETRY_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(serverLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(applicationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(connectivityErrorCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.ReadRows", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(throttlingLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(firstResponseLatency); - } - - @Test - public void testUnaryOperations() throws InterruptedException { - StatsRecorderWrapper recorderWrapper = - new StatsRecorderWrapper( - ApiTracerFactory.OperationType.Unary, - SpanName.of("Bigtable", "MutateRow"), - ImmutableMap.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), PROJECT_ID, - BuiltinMeasureConstants.INSTANCE_ID.getName(), INSTANCE_ID, - BuiltinMeasureConstants.APP_PROFILE.getName(), APP_PROFILE_ID, - BuiltinMeasureConstants.CLIENT_NAME.getName(), CLIENT_AND_VERSION), - statsComponent.getStatsRecorder()); - - long operationLatency = 1234; - int attemptCount = 2; - long attemptLatency = 56; - long serverLatency = 78; - long applicationLatency = 901; - long connectivityErrorCount = 15; - long throttlingLatency = 50; - long firstResponseLatency = 90; - - recorderWrapper.putOperationLatencies(operationLatency); - recorderWrapper.putRetryCount(attemptCount); - recorderWrapper.putAttemptLatencies(attemptLatency); - recorderWrapper.putApplicationLatencies(applicationLatency); - recorderWrapper.putGfeLatencies(serverLatency); - recorderWrapper.putGfeMissingHeaders(connectivityErrorCount); - recorderWrapper.putFirstResponseLatencies(firstResponseLatency); - recorderWrapper.putClientBlockingLatencies(throttlingLatency); - - recorderWrapper.recordOperation("UNAVAILABLE", TABLE_ID, ZONE, CLUSTER); - recorderWrapper.recordAttempt("UNAVAILABLE", TABLE_ID, ZONE, CLUSTER); - - Thread.sleep(100); - - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(operationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.RETRY_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(serverLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(applicationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(connectivityErrorCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.MutateRow", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(throttlingLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(firstResponseLatency); - } - - long getAggregationValueAsLong( - View view, - ImmutableMap tags, - String projectId, - String instanceId, - String appProfileId, - ViewManager viewManager) { - ViewData viewData = viewManager.getView(view.getName()); - Map, AggregationData> aggregationMap = - Objects.requireNonNull(viewData).getAggregationMap(); - - List tagValues = new ArrayList<>(); - - for (TagKey column : view.getColumns()) { - if (BuiltinMeasureConstants.PROJECT_ID == column) { - tagValues.add(TagValue.create(projectId)); - } else if (BuiltinMeasureConstants.INSTANCE_ID == column) { - tagValues.add(TagValue.create(instanceId)); - } else if (BuiltinMeasureConstants.APP_PROFILE == column) { - tagValues.add(TagValue.create(appProfileId)); - } else { - tagValues.add(TagValue.create(tags.get(column))); - } - } - - AggregationData aggregationData = aggregationMap.get(tagValues); - - return aggregationData.match( - arg -> (long) arg.getSum(), - AggregationData.SumDataLong::getSum, - arg -> arg.getCount(), - arg -> (long) arg.getMean(), - arg -> (long) arg.getLastValue(), - AggregationData.LastValueDataLong::getLastValue, - arg -> { - throw new UnsupportedOperationException(); - }); - } -} diff --git a/google-cloud-bigtable/clirr-ignored-differences.xml b/google-cloud-bigtable/clirr-ignored-differences.xml index 7ac7946561..034168c2a1 100644 --- a/google-cloud-bigtable/clirr-ignored-differences.xml +++ b/google-cloud-bigtable/clirr-ignored-differences.xml @@ -163,6 +163,12 @@ 8001 com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerBatchedUnaryCallable + + + 7004 + com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory + * + 6001 @@ -188,6 +194,11 @@ * + + 7004 + com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker + * + 7012 com/google/cloud/bigtable/data/v2/models/MutationApi diff --git a/google-cloud-bigtable/pom.xml b/google-cloud-bigtable/pom.xml index b36f9d61bd..dda5e2e633 100644 --- a/google-cloud-bigtable/pom.xml +++ b/google-cloud-bigtable/pom.xml @@ -64,19 +64,6 @@ - - com.google.cloud - google-cloud-bigtable-stats - - - - io.opencensus - * - - - com.google.api @@ -229,6 +216,41 @@ threetenbp + + + io.opentelemetry + opentelemetry-api + + + io.opentelemetry + opentelemetry-sdk + + + io.opentelemetry + opentelemetry-sdk-metrics + + + io.opentelemetry + opentelemetry-sdk-common + + + com.google.cloud.opentelemetry + detector-resources-support + + + io.opentelemetry + opentelemetry-sdk-testing + test + + + com.google.cloud + google-cloud-monitoring + + + com.google.api.grpc + proto-google-cloud-monitoring-v3 + + com.google.api @@ -272,23 +294,6 @@ - - com.google.cloud - google-cloud-monitoring - - - - io.perfmark - perfmark-api - - - runtime - - - com.google.api.grpc - proto-google-cloud-monitoring-v3 - runtime - com.google.truth truth diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java index c35500a189..45ec5af814 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java @@ -19,6 +19,7 @@ import com.google.api.gax.core.BackgroundResource; import com.google.api.gax.rpc.ClientContext; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; +import io.opentelemetry.api.OpenTelemetry; import java.io.IOException; import javax.annotation.Nonnull; @@ -64,6 +65,7 @@ public final class BigtableDataClientFactory implements AutoCloseable { private final BigtableDataSettings defaultSettings; private final ClientContext sharedClientContext; + private final OpenTelemetry openTelemetry; /** * Create a instance of this factory. @@ -75,13 +77,21 @@ public static BigtableDataClientFactory create(BigtableDataSettings defaultSetti throws IOException { ClientContext sharedClientContext = EnhancedBigtableStub.createClientContext(defaultSettings.getStubSettings()); - return new BigtableDataClientFactory(sharedClientContext, defaultSettings); + OpenTelemetry openTelemetry = + EnhancedBigtableStub.getOpenTelemetry( + defaultSettings.getProjectId(), + defaultSettings.getMetricsProvider(), + sharedClientContext.getCredentials()); + return new BigtableDataClientFactory(sharedClientContext, defaultSettings, openTelemetry); } private BigtableDataClientFactory( - ClientContext sharedClientContext, BigtableDataSettings defaultSettings) { + ClientContext sharedClientContext, + BigtableDataSettings defaultSettings, + OpenTelemetry openTelemetry) { this.sharedClientContext = sharedClientContext; this.defaultSettings = defaultSettings; + this.openTelemetry = openTelemetry; } /** @@ -112,7 +122,7 @@ public BigtableDataClient createDefault() { .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - defaultSettings.getStubSettings())) + defaultSettings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(defaultSettings, clientContext); @@ -140,7 +150,8 @@ public BigtableDataClient createForAppProfile(@Nonnull String appProfileId) thro sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); } @@ -168,7 +179,8 @@ public BigtableDataClient createForInstance(@Nonnull String projectId, @Nonnull sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); @@ -197,7 +209,8 @@ public BigtableDataClient createForInstance( sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java index 701a5e8e49..928159aa6d 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java @@ -25,19 +25,16 @@ import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; import com.google.api.gax.rpc.UnaryCallSettings; import com.google.auth.Credentials; -import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.stub.BigtableBatchingCallSettings; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.BigtableStackdriverStatsExporter; -import com.google.cloud.bigtable.stats.BuiltinViews; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.common.base.MoreObjects; import com.google.common.base.Strings; import io.grpc.ManagedChannelBuilder; import java.io.IOException; import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Logger; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -77,7 +74,10 @@ public final class BigtableDataSettings { private static final Logger LOGGER = Logger.getLogger(BigtableDataSettings.class.getName()); private static final String BIGTABLE_EMULATOR_HOST_ENV_VAR = "BIGTABLE_EMULATOR_HOST"; - private static final AtomicBoolean BUILTIN_METRICS_REGISTERED = new AtomicBoolean(false); + // This is the legacy credential override used in the deprecated enableBuiltinMetrics method to + // override the default credentials set on the Bigtable client. Keeping it for backward + // compatibility. + @Deprecated @Nullable private static Credentials legacyMetricCredentialOverride; private final EnhancedBigtableStubSettings stubSettings; @@ -197,23 +197,34 @@ public static void enableGfeOpenCensusStats() { com.google.cloud.bigtable.data.v2.stub.metrics.RpcViews.registerBigtableClientGfeViews(); } - /** Register built in metrics. */ - public static void enableBuiltinMetrics() throws IOException { - if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) { - BuiltinViews.registerBigtableBuiltinViews(); - BigtableStackdriverStatsExporter.register(GoogleCredentials.getApplicationDefault()); - } - } + /** + * Register built in metrics. + * + * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default + * now. Please refer to {@link + * BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} on how to enable or + * disable built-in metrics. + */ + @Deprecated + public static void enableBuiltinMetrics() throws IOException {} /** * Register built in metrics with credentials. The credentials need to have metric write access * for all the projects you're publishing to. + * + * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default + * now. Please refer {@link BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} + * on how to enable or disable built-in metrics. */ + @Deprecated public static void enableBuiltinMetrics(Credentials credentials) throws IOException { - if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) { - BuiltinViews.registerBigtableBuiltinViews(); - BigtableStackdriverStatsExporter.register(credentials); - } + BigtableDataSettings.legacyMetricCredentialOverride = credentials; + } + + /** Get the metrics credentials if it's set by {@link #enableBuiltinMetrics(Credentials)}. */ + @InternalApi + public static Credentials getMetricsCredentials() { + return legacyMetricCredentialOverride; } /** Returns the target project id. */ @@ -278,6 +289,11 @@ public boolean isBulkMutationFlowControlEnabled() { return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled(); } + /** Gets the {@link MetricsProvider}. * */ + public MetricsProvider getMetricsProvider() { + return stubSettings.getMetricsProvider(); + } + /** Returns the underlying RPC settings. */ public EnhancedBigtableStubSettings getStubSettings() { return stubSettings; @@ -527,6 +543,30 @@ public boolean isBulkMutationFlowControlEnabled() { return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled(); } + /** + * Sets the {@link MetricsProvider}. + * + *

By default, this is set to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will + * collect and export client side metrics. + * + *

To disable client side metrics, set it to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}. + * + *

To use a custom OpenTelemetry instance, refer to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to + * set it up. + */ + public Builder setMetricsProvider(MetricsProvider metricsProvider) { + stubSettings.setMetricsProvider(metricsProvider); + return this; + } + + /** Gets the {@link MetricsProvider}. */ + public MetricsProvider getMetricsProvider() { + return stubSettings.getMetricsProvider(); + } + /** * Returns the underlying settings for making RPC calls. The settings should be changed with * care. diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index ec15c4131a..f0aa852338 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -15,6 +15,11 @@ */ package com.google.cloud.bigtable.data.v2.stub; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; + import com.google.api.core.ApiFunction; import com.google.api.core.BetaApi; import com.google.api.core.InternalApi; @@ -68,6 +73,7 @@ import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.SampleRowKeysResponse; import com.google.cloud.bigtable.Version; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.internal.RequestContext; @@ -97,8 +103,12 @@ import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory; import com.google.cloud.bigtable.data.v2.stub.metrics.CompositeTracerFactory; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; +import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.ErrorCountPerConnectionMetricTracker; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsTracerFactory; +import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants; import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersServerStreamingCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersUnaryCallable; @@ -130,6 +140,8 @@ import io.opencensus.tags.TagValue; import io.opencensus.tags.Tagger; import io.opencensus.tags.Tags; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -185,10 +197,17 @@ public class EnhancedBigtableStub implements AutoCloseable { public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings) throws IOException { - settings = settings.toBuilder().setTracerFactory(createBigtableTracerFactory(settings)).build(); ClientContext clientContext = createClientContext(settings); + OpenTelemetry openTelemetry = + getOpenTelemetry( + settings.getProjectId(), settings.getMetricsProvider(), clientContext.getCredentials()); + ClientContext contextWithTracer = + clientContext + .toBuilder() + .setTracerFactory(createBigtableTracerFactory(settings, openTelemetry)) + .build(); - return new EnhancedBigtableStub(settings, clientContext); + return new EnhancedBigtableStub(settings, contextWithTracer); } public static EnhancedBigtableStub createWithClientContext( @@ -207,15 +226,26 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set // workaround JWT audience issues patchCredentials(builder); + // Fix the credentials so that they can be shared + Credentials credentials = null; + if (builder.getCredentialsProvider() != null) { + credentials = builder.getCredentialsProvider().getCredentials(); + } + builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); + InstantiatingGrpcChannelProvider.Builder transportProvider = builder.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider ? ((InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider()).toBuilder() : null; + OpenTelemetry openTelemetry = + getOpenTelemetry(settings.getProjectId(), settings.getMetricsProvider(), credentials); ErrorCountPerConnectionMetricTracker errorCountPerConnectionMetricTracker; - if (transportProvider != null) { + // Skip setting up ErrorCountPerConnectionMetricTracker if openTelemetry is null + if (openTelemetry != null && transportProvider != null) { errorCountPerConnectionMetricTracker = - new ErrorCountPerConnectionMetricTracker(createBuiltinAttributes(builder)); + new ErrorCountPerConnectionMetricTracker( + openTelemetry, createBuiltinAttributes(settings)); ApiFunction oldChannelConfigurator = transportProvider.getChannelConfigurator(); transportProvider.setChannelConfigurator( @@ -237,12 +267,6 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set // Inject channel priming if (settings.isRefreshingChannel()) { - // Fix the credentials so that they can be shared - Credentials credentials = null; - if (builder.getCredentialsProvider() != null) { - credentials = builder.getCredentialsProvider().getCredentials(); - } - builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); if (transportProvider != null) { transportProvider.setChannelPrimer( @@ -267,13 +291,18 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set } public static ApiTracerFactory createBigtableTracerFactory( - EnhancedBigtableStubSettings settings) { - return createBigtableTracerFactory(settings, Tags.getTagger(), Stats.getStatsRecorder()); + EnhancedBigtableStubSettings settings, OpenTelemetry openTelemetry) throws IOException { + return createBigtableTracerFactory( + settings, Tags.getTagger(), Stats.getStatsRecorder(), openTelemetry); } @VisibleForTesting public static ApiTracerFactory createBigtableTracerFactory( - EnhancedBigtableStubSettings settings, Tagger tagger, StatsRecorder stats) { + EnhancedBigtableStubSettings settings, + Tagger tagger, + StatsRecorder stats, + OpenTelemetry openTelemetry) + throws IOException { String projectId = settings.getProjectId(); String instanceId = settings.getInstanceId(); String appProfileId = settings.getAppProfileId(); @@ -284,10 +313,10 @@ public static ApiTracerFactory createBigtableTracerFactory( .put(RpcMeasureConstants.BIGTABLE_INSTANCE_ID, TagValue.create(instanceId)) .put(RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID, TagValue.create(appProfileId)) .build(); - ImmutableMap builtinAttributes = createBuiltinAttributes(settings.toBuilder()); - return new CompositeTracerFactory( - ImmutableList.of( + ImmutableList.Builder tracerFactories = ImmutableList.builder(); + tracerFactories + .add( // Add OpenCensus Tracing new OpencensusTracerFactory( ImmutableMap.builder() @@ -299,22 +328,52 @@ public static ApiTracerFactory createBigtableTracerFactory( .put("gax", GaxGrpcProperties.getGaxGrpcVersion()) .put("grpc", GaxGrpcProperties.getGrpcVersion()) .put("gapic", Version.VERSION) - .build()), - // Add OpenCensus Metrics - MetricsTracerFactory.create(tagger, stats, attributes), - BuiltinMetricsTracerFactory.create(builtinAttributes), - // Add user configured tracer - settings.getTracerFactory())); + .build())) + // Add OpenCensus Metrics + .add(MetricsTracerFactory.create(tagger, stats, attributes)) + // Add user configured tracer + .add(settings.getTracerFactory()); + BuiltinMetricsTracerFactory builtinMetricsTracerFactory = + openTelemetry != null + ? BuiltinMetricsTracerFactory.create(openTelemetry, createBuiltinAttributes(settings)) + : null; + if (builtinMetricsTracerFactory != null) { + tracerFactories.add(builtinMetricsTracerFactory); + } + return new CompositeTracerFactory(tracerFactories.build()); + } + + @Nullable + public static OpenTelemetry getOpenTelemetry( + String projectId, MetricsProvider metricsProvider, @Nullable Credentials defaultCredentials) + throws IOException { + if (metricsProvider instanceof CustomOpenTelemetryMetricsProvider) { + CustomOpenTelemetryMetricsProvider customMetricsProvider = + (CustomOpenTelemetryMetricsProvider) metricsProvider; + return customMetricsProvider.getOpenTelemetry(); + } else if (metricsProvider instanceof DefaultMetricsProvider) { + Credentials credentials = + BigtableDataSettings.getMetricsCredentials() != null + ? BigtableDataSettings.getMetricsCredentials() + : defaultCredentials; + DefaultMetricsProvider defaultMetricsProvider = (DefaultMetricsProvider) metricsProvider; + return defaultMetricsProvider.getOpenTelemetry(projectId, credentials); + } else if (metricsProvider instanceof NoopMetricsProvider) { + return null; + } + throw new IOException("Invalid MetricsProvider type " + metricsProvider); } - private static ImmutableMap createBuiltinAttributes( - EnhancedBigtableStubSettings.Builder builder) { - return ImmutableMap.builder() - .put("project_id", builder.getProjectId()) - .put("instance", builder.getInstanceId()) - .put("app_profile", builder.getAppProfileId()) - .put("client_name", "bigtable-java/" + Version.VERSION) - .build(); + private static Attributes createBuiltinAttributes(EnhancedBigtableStubSettings settings) { + return Attributes.of( + BIGTABLE_PROJECT_ID_KEY, + settings.getProjectId(), + INSTANCE_ID_KEY, + settings.getInstanceId(), + APP_PROFILE_KEY, + settings.getAppProfileId(), + CLIENT_NAME_KEY, + "bigtable-java/" + Version.VERSION); } private static void patchCredentials(EnhancedBigtableStubSettings.Builder settings) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index 9a5027c740..f07a8fb7fc 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -44,6 +44,8 @@ import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor; import com.google.common.base.MoreObjects; @@ -229,6 +231,8 @@ public class EnhancedBigtableStubSettings extends StubSettings getJwtAudienceMapping() { return jwtAudienceMapping; } + public MetricsProvider getMetricsProvider() { + return metricsProvider; + } + /** * Gets if routing cookie is enabled. If true, client will retry a request with extra metadata * server sent back. @@ -636,6 +645,8 @@ public static class Builder extends StubSettings.Builder jwtAudienceMapping) { return this; } + /** + * Sets the {@link MetricsProvider}. + * + *

By default, this is set to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will + * collect and export client side metrics. + * + *

To disable client side metrics, set it to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}. + * + *

To use a custom OpenTelemetry instance, refer to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to + * set it up. + */ + public Builder setMetricsProvider(MetricsProvider metricsProvider) { + this.metricsProvider = Preconditions.checkNotNull(metricsProvider); + return this; + } + + /** Gets the {@link MetricsProvider}. */ + public MetricsProvider getMetricsProvider() { + return this.metricsProvider; + } + @InternalApi("Used for internal testing") public Map getJwtAudienceMapping() { return jwtAudienceMapping; @@ -1028,6 +1067,11 @@ public EnhancedBigtableStubSettings build() { featureFlags.setRoutingCookie(this.getEnableRoutingCookie()); featureFlags.setRetryInfo(this.getEnableRetryInfo()); + // client_Side_metrics_enabled feature flag is only set when a user is running with a + // DefaultMetricsProvider. This may cause false negatives when a user registered the + // metrics on their CustomOpenTelemetryMetricsProvider. + featureFlags.setClientSideMetricsEnabled( + this.getMetricsProvider() instanceof DefaultMetricsProvider); // Serialize the web64 encode the bigtable feature flags ByteArrayOutputStream boas = new ByteArrayOutputStream(); @@ -1080,6 +1124,7 @@ public String toString() { generateInitialChangeStreamPartitionsSettings) .add("readChangeStreamSettings", readChangeStreamSettings) .add("pingAndWarmSettings", pingAndWarmSettings) + .add("metricsProvider", metricsProvider) .add("parent", super.toString()) .toString(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java index 6208fce89e..97cc2f73ec 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java @@ -86,7 +86,7 @@ public void call( stopwatch.stop(); if (context.getTracer() instanceof BigtableTracer) { ((BigtableTracer) context.getTracer()) - .batchRequestThrottled(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + .batchRequestThrottled(stopwatch.elapsed(TimeUnit.NANOSECONDS)); } RateLimitingResponseObserver innerObserver = new RateLimitingResponseObserver(limiter, lastQpsChangeTime, responseObserver); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java new file mode 100644 index 0000000000..d3f88b88c2 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java @@ -0,0 +1,356 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; + +import com.google.api.MonitoredResource; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.InternalApi; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.auth.Credentials; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.MetricServiceSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.threeten.bp.Duration; + +/** + * Bigtable Cloud Monitoring OpenTelemetry Exporter. + * + *

The exporter will look for all bigtable owned metrics under bigtable.googleapis.com + * instrumentation scope and upload it via the Google Cloud Monitoring API. + */ +@InternalApi +public final class BigtableCloudMonitoringExporter implements MetricExporter { + + private static final Logger logger = + Logger.getLogger(BigtableCloudMonitoringExporter.class.getName()); + + // This system property can be used to override the monitoring endpoint + // to a different environment. It's meant for internal testing only. + private static final String MONITORING_ENDPOINT = + MoreObjects.firstNonNull( + System.getProperty("bigtable.test-monitoring-endpoint"), + MetricServiceSettings.getDefaultEndpoint()); + + private static String APPLICATION_RESOURCE_PROJECT_ID = "project_id"; + + private final MetricServiceClient client; + + private final String bigtableProjectId; + private final String taskId; + + // The resource the client application is running on + private final MonitoredResource applicationResource; + + private final AtomicBoolean isShutdown = new AtomicBoolean(false); + + private CompletableResultCode lastExportCode; + + private static final ImmutableList BIGTABLE_TABLE_METRICS = + ImmutableSet.of( + OPERATION_LATENCIES_NAME, + ATTEMPT_LATENCIES_NAME, + SERVER_LATENCIES_NAME, + FIRST_RESPONSE_LATENCIES_NAME, + CLIENT_BLOCKING_LATENCIES_NAME, + APPLICATION_BLOCKING_LATENCIES_NAME, + RETRY_COUNT_NAME, + CONNECTIVITY_ERROR_COUNT_NAME) + .stream() + .map(m -> METER_NAME + m) + .collect(ImmutableList.toImmutableList()); + + private static final ImmutableList APPLICATION_METRICS = + ImmutableSet.of(PER_CONNECTION_ERROR_COUNT_NAME).stream() + .map(m -> METER_NAME + m) + .collect(ImmutableList.toImmutableList()); + + public static BigtableCloudMonitoringExporter create( + String projectId, @Nullable Credentials credentials) throws IOException { + MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder(); + CredentialsProvider credentialsProvider = + Optional.ofNullable(credentials) + .map(FixedCredentialsProvider::create) + .orElse(NoCredentialsProvider.create()); + settingsBuilder.setCredentialsProvider(credentialsProvider); + settingsBuilder.setEndpoint(MONITORING_ENDPOINT); + + org.threeten.bp.Duration timeout = Duration.ofMinutes(1); + // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving + // it as not retried for now. + settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout); + + // Detect the resource that the client application is running on. For example, + // this could be a GCE instance or a GKE pod. Currently, we only support GCE instance and + // GKE pod. This method will return null for everything else. + MonitoredResource applicationResource = BigtableExporterUtils.detectResource(); + + return new BigtableCloudMonitoringExporter( + projectId, + MetricServiceClient.create(settingsBuilder.build()), + applicationResource, + BigtableExporterUtils.getDefaultTaskValue()); + } + + @VisibleForTesting + BigtableCloudMonitoringExporter( + String projectId, + MetricServiceClient client, + @Nullable MonitoredResource applicationResource, + String taskId) { + this.client = client; + this.taskId = taskId; + this.applicationResource = applicationResource; + this.bigtableProjectId = projectId; + } + + @Override + public CompletableResultCode export(Collection collection) { + if (isShutdown.get()) { + logger.log(Level.WARNING, "Exporter is shutting down"); + return CompletableResultCode.ofFailure(); + } + + CompletableResultCode bigtableExportCode = exportBigtableResourceMetrics(collection); + CompletableResultCode applicationExportCode = exportApplicationResourceMetrics(collection); + + lastExportCode = + CompletableResultCode.ofAll(ImmutableList.of(applicationExportCode, bigtableExportCode)); + + return lastExportCode; + } + + /** Export metrics associated with a BigtableTable resource. */ + private CompletableResultCode exportBigtableResourceMetrics(Collection collection) { + // Filter bigtable table metrics + List bigtableMetricData = + collection.stream() + .filter(md -> BIGTABLE_TABLE_METRICS.contains(md.getName())) + .collect(Collectors.toList()); + + // Skips exporting if there's none + if (bigtableMetricData.isEmpty()) { + return CompletableResultCode.ofSuccess(); + } + + // Verifies metrics project id are the same as the bigtable project id set on this client + if (!bigtableMetricData.stream() + .flatMap(metricData -> metricData.getData().getPoints().stream()) + .allMatch(pd -> bigtableProjectId.equals(BigtableExporterUtils.getProjectId(pd)))) { + logger.log(Level.WARNING, "Metric data has different a projectId. Skip exporting."); + return CompletableResultCode.ofFailure(); + } + + List bigtableTimeSeries; + try { + bigtableTimeSeries = + BigtableExporterUtils.convertToBigtableTimeSeries(bigtableMetricData, taskId); + } catch (Throwable e) { + logger.log( + Level.WARNING, + "Failed to convert bigtable table metric data to cloud monitoring timeseries.", + e); + return CompletableResultCode.ofFailure(); + } + + ProjectName projectName = ProjectName.of(bigtableProjectId); + CreateTimeSeriesRequest bigtableRequest = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(bigtableTimeSeries) + .build(); + + ApiFuture future = + this.client.createServiceTimeSeriesCallable().futureCall(bigtableRequest); + + CompletableResultCode bigtableExportCode = new CompletableResultCode(); + ApiFutures.addCallback( + future, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + logger.log( + Level.WARNING, + "createServiceTimeSeries request failed for bigtable metrics. ", + throwable); + bigtableExportCode.fail(); + } + + @Override + public void onSuccess(Empty empty) { + bigtableExportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + + return bigtableExportCode; + } + + /** Export metrics associated with the resource the Application is running on. */ + private CompletableResultCode exportApplicationResourceMetrics( + Collection collection) { + if (applicationResource == null) { + return CompletableResultCode.ofSuccess(); + } + + // Filter application level metrics + List metricData = + collection.stream() + .filter(md -> APPLICATION_METRICS.contains(md.getName())) + .collect(Collectors.toList()); + + // Skip exporting if there's none + if (metricData.isEmpty()) { + return CompletableResultCode.ofSuccess(); + } + + List timeSeries; + try { + timeSeries = + BigtableExporterUtils.convertToApplicationResourceTimeSeries( + metricData, taskId, applicationResource); + } catch (Throwable e) { + logger.log( + Level.WARNING, + "Failed to convert application metric data to cloud monitoring timeseries.", + e); + return CompletableResultCode.ofFailure(); + } + + // Construct the request. The project id will be the project id of the detected monitored + // resource. + ApiFuture gceOrGkeFuture; + CompletableResultCode exportCode = new CompletableResultCode(); + try { + ProjectName projectName = + ProjectName.of(applicationResource.getLabelsOrThrow(APPLICATION_RESOURCE_PROJECT_ID)); + CreateTimeSeriesRequest request = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(timeSeries) + .build(); + + gceOrGkeFuture = this.client.createServiceTimeSeriesCallable().futureCall(request); + + ApiFutures.addCallback( + gceOrGkeFuture, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + logger.log( + Level.WARNING, + "createServiceTimeSeries request failed for per connection error metrics.", + throwable); + exportCode.fail(); + } + + @Override + public void onSuccess(Empty empty) { + exportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + + } catch (Exception e) { + logger.log( + Level.WARNING, + "Failed to get projectName for application resource " + applicationResource); + return CompletableResultCode.ofFailure(); + } + + return exportCode; + } + + @Override + public CompletableResultCode flush() { + if (lastExportCode != null) { + return lastExportCode; + } + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + if (!isShutdown.compareAndSet(false, true)) { + logger.log(Level.WARNING, "shutdown is called multiple times"); + return CompletableResultCode.ofSuccess(); + } + CompletableResultCode flushResult = flush(); + CompletableResultCode shutdownResult = new CompletableResultCode(); + flushResult.whenComplete( + () -> { + Throwable throwable = null; + try { + client.shutdown(); + } catch (Throwable e) { + logger.log(Level.WARNING, "failed to shutdown the monitoring client", e); + throwable = e; + } + if (throwable != null) { + shutdownResult.fail(); + } else { + shutdownResult.succeed(); + } + }); + return CompletableResultCode.ofAll(Arrays.asList(flushResult, shutdownResult)); + } + + /** + * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a + * metric over time. + */ + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return AggregationTemporality.CUMULATIVE; + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java new file mode 100644 index 0000000000..9a4d928ce4 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java @@ -0,0 +1,347 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.api.Distribution.BucketOptions; +import static com.google.api.Distribution.BucketOptions.Explicit; +import static com.google.api.MetricDescriptor.MetricKind; +import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE; +import static com.google.api.MetricDescriptor.MetricKind.GAUGE; +import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED; +import static com.google.api.MetricDescriptor.ValueType; +import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; +import static com.google.api.MetricDescriptor.ValueType.DOUBLE; +import static com.google.api.MetricDescriptor.ValueType.INT64; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; + +import com.google.api.Distribution; +import com.google.api.Metric; +import com.google.api.MonitoredResource; +import com.google.cloud.opentelemetry.detection.AttributeKeys; +import com.google.cloud.opentelemetry.detection.DetectedPlatform; +import com.google.cloud.opentelemetry.detection.GCPPlatformDetector; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableSet; +import com.google.monitoring.v3.Point; +import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.monitoring.v3.TypedValue; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.data.HistogramData; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.MetricDataType; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.metrics.data.SumData; +import java.lang.management.ManagementFactory; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** Utils to convert OpenTelemetry types to Google Cloud Monitoring types. */ +class BigtableExporterUtils { + + private static final Logger logger = Logger.getLogger(BigtableExporterUtils.class.getName()); + + private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw"; + + // These metric labels will be promoted to the bigtable_table monitored resource fields + private static final Set> BIGTABLE_PROMOTED_RESOURCE_LABELS = + ImmutableSet.of( + BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, TABLE_ID_KEY, CLUSTER_ID_KEY, ZONE_ID_KEY); + + private BigtableExporterUtils() {} + + /** + * In most cases this should look like java-${UUID}@${hostname}. The hostname will be retrieved + * from the jvm name and fallback to the local hostname. + */ + static String getDefaultTaskValue() { + // Something like '@' + final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); + // If jvm doesn't have the expected format, fallback to the local hostname + if (jvmName.indexOf('@') < 1) { + String hostname = "localhost"; + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + logger.log(Level.INFO, "Unable to get the hostname.", e); + } + // Generate a random number and use the same format "random_number@hostname". + return "java-" + UUID.randomUUID() + "@" + hostname; + } + return "java-" + UUID.randomUUID() + jvmName; + } + + static String getProjectId(PointData pointData) { + return pointData.getAttributes().get(BIGTABLE_PROJECT_ID_KEY); + } + + static List convertToBigtableTimeSeries(List collection, String taskId) { + List allTimeSeries = new ArrayList<>(); + + for (MetricData metricData : collection) { + if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) { + // Filter out metric data for instruments that are not part of the bigtable builtin metrics + continue; + } + metricData.getData().getPoints().stream() + .map(pointData -> convertPointToBigtableTimeSeries(metricData, pointData, taskId)) + .forEach(allTimeSeries::add); + } + + return allTimeSeries; + } + + static List convertToApplicationResourceTimeSeries( + Collection collection, String taskId, MonitoredResource applicationResource) { + Preconditions.checkNotNull( + applicationResource, + "convert application metrics is called when the supported resource is not detected"); + List allTimeSeries = new ArrayList<>(); + for (MetricData metricData : collection) { + if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) { + // Filter out metric data for instruments that are not part of the bigtable builtin metrics + continue; + } + metricData.getData().getPoints().stream() + .map( + pointData -> + convertPointToApplicationResourceTimeSeries( + metricData, pointData, taskId, applicationResource)) + .forEach(allTimeSeries::add); + } + return allTimeSeries; + } + + @Nullable + static MonitoredResource detectResource() { + GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE; + DetectedPlatform detectedPlatform = detector.detectPlatform(); + switch (detectedPlatform.getSupportedPlatform()) { + case GOOGLE_COMPUTE_ENGINE: + return createGceMonitoredResource( + detectedPlatform.getProjectId(), detectedPlatform.getAttributes()); + case GOOGLE_KUBERNETES_ENGINE: + return createGkeMonitoredResource( + detectedPlatform.getProjectId(), detectedPlatform.getAttributes()); + default: + return null; + } + } + + private static MonitoredResource createGceMonitoredResource( + String projectId, Map attributes) { + return MonitoredResource.newBuilder() + .setType("gce_instance") + .putLabels("project_id", projectId) + .putLabels("instance_id", attributes.get(AttributeKeys.GCE_INSTANCE_ID)) + .putLabels("zone", attributes.get(AttributeKeys.GCE_AVAILABILITY_ZONE)) + .build(); + } + + private static MonitoredResource createGkeMonitoredResource( + String projectId, Map attributes) { + return MonitoredResource.newBuilder() + .setType("k8s_container") + .putLabels("project_id", projectId) + .putLabels("location", attributes.get(AttributeKeys.GKE_CLUSTER_LOCATION)) + .putLabels("cluster_name", attributes.get(AttributeKeys.GKE_CLUSTER_NAME)) + .putLabels("namespace_name", MoreObjects.firstNonNull(System.getenv("NAMESPACE"), "")) + .putLabels("pod_name", MoreObjects.firstNonNull(System.getenv("HOSTNAME"), "")) + .putLabels("container_name", MoreObjects.firstNonNull(System.getenv("CONTAINER_NAME"), "")) + .build(); + } + + private static TimeSeries convertPointToBigtableTimeSeries( + MetricData metricData, PointData pointData, String taskId) { + TimeSeries.Builder builder = + TimeSeries.newBuilder() + .setMetricKind(convertMetricKind(metricData)) + .setValueType(convertValueType(metricData.getType())); + Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); + + Attributes attributes = pointData.getAttributes(); + MonitoredResource.Builder monitoredResourceBuilder = + MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE); + + for (AttributeKey key : attributes.asMap().keySet()) { + if (BIGTABLE_PROMOTED_RESOURCE_LABELS.contains(key)) { + monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } else { + metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } + } + + builder.setResource(monitoredResourceBuilder.build()); + + metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId); + builder.setMetric(metricBuilder.build()); + + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); + + return builder.build(); + } + + private static TimeSeries convertPointToApplicationResourceTimeSeries( + MetricData metricData, + PointData pointData, + String taskId, + MonitoredResource applicationResource) { + TimeSeries.Builder builder = + TimeSeries.newBuilder() + .setMetricKind(convertMetricKind(metricData)) + .setValueType(convertValueType(metricData.getType())) + .setResource(applicationResource); + + Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); + + Attributes attributes = pointData.getAttributes(); + for (AttributeKey key : attributes.asMap().keySet()) { + metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } + + metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId); + builder.setMetric(metricBuilder.build()); + + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); + return builder.build(); + } + + private static MetricKind convertMetricKind(MetricData metricData) { + switch (metricData.getType()) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return convertHistogramType(metricData.getHistogramData()); + case LONG_GAUGE: + case DOUBLE_GAUGE: + return GAUGE; + case LONG_SUM: + return convertSumDataType(metricData.getLongSumData()); + case DOUBLE_SUM: + return convertSumDataType(metricData.getDoubleSumData()); + default: + return UNRECOGNIZED; + } + } + + private static MetricKind convertHistogramType(HistogramData histogramData) { + if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static MetricKind convertSumDataType(SumData sum) { + if (!sum.isMonotonic()) { + return GAUGE; + } + if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static ValueType convertValueType(MetricDataType metricDataType) { + switch (metricDataType) { + case LONG_GAUGE: + case LONG_SUM: + return INT64; + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return DOUBLE; + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return DISTRIBUTION; + default: + return ValueType.UNRECOGNIZED; + } + } + + private static Point createPoint( + MetricDataType type, PointData pointData, TimeInterval timeInterval) { + Point.Builder builder = Point.newBuilder().setInterval(timeInterval); + switch (type) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return builder + .setValue( + TypedValue.newBuilder() + .setDistributionValue(convertHistogramData((HistogramPointData) pointData)) + .build()) + .build(); + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return builder + .setValue( + TypedValue.newBuilder() + .setDoubleValue(((DoublePointData) pointData).getValue()) + .build()) + .build(); + case LONG_GAUGE: + case LONG_SUM: + return builder + .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue())) + .build(); + default: + logger.log(Level.WARNING, "unsupported metric type"); + return builder.build(); + } + } + + private static Distribution convertHistogramData(HistogramPointData pointData) { + return Distribution.newBuilder() + .setCount(pointData.getCount()) + .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount()) + .setBucketOptions( + BucketOptions.newBuilder() + .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries()))) + .addAllBucketCounts(pointData.getCounts()) + .build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java index 1cda49934c..3b2242385a 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java @@ -42,7 +42,7 @@ public void streamCreated(Attributes transportAttrs, Metadata headers) { @Override public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalUncompressedSize) { - tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.NANOSECONDS)); } static class Factory extends ClientStreamTracer.Factory { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java new file mode 100644 index 0000000000..d85300828b --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java @@ -0,0 +1,220 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalApi; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.metrics.Aggregation; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.View; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** Defining Bigtable builit-in metrics scope, attributes, metric names and views. */ +@InternalApi +public class BuiltinMetricsConstants { + + // Metric attribute keys for monitored resource + public static final AttributeKey BIGTABLE_PROJECT_ID_KEY = + AttributeKey.stringKey("project_id"); + public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance"); + public static final AttributeKey TABLE_ID_KEY = AttributeKey.stringKey("table"); + public static final AttributeKey CLUSTER_ID_KEY = AttributeKey.stringKey("cluster"); + public static final AttributeKey ZONE_ID_KEY = AttributeKey.stringKey("zone"); + + // Metric attribute keys for labels + // We need to access APP_PROFILE_KEY in EnhancedBigtableStubSettings and STREAMING_KEY in + // IT tests, so they're public. + public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); + public static final AttributeKey STREAMING_KEY = AttributeKey.booleanKey("streaming"); + public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name"); + static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid"); + + // Metric names + public static final String OPERATION_LATENCIES_NAME = "operation_latencies"; + public static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies"; + static final String RETRY_COUNT_NAME = "retry_count"; + static final String CONNECTIVITY_ERROR_COUNT_NAME = "connectivity_error_count"; + static final String SERVER_LATENCIES_NAME = "server_latencies"; + static final String FIRST_RESPONSE_LATENCIES_NAME = "first_response_latencies"; + static final String APPLICATION_BLOCKING_LATENCIES_NAME = "application_latencies"; + static final String CLIENT_BLOCKING_LATENCIES_NAME = "throttling_latencies"; + static final String PER_CONNECTION_ERROR_COUNT_NAME = "per_connection_error_count"; + + // Buckets under 100,000 are identical to buckets for server side metrics handler_latencies. + // Extending client side bucket to up to 3,200,000. + private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = + Aggregation.explicitBucketHistogram( + ImmutableList.of( + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, + 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, + 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0, + 400000.0, 800000.0, 1600000.0, 3200000.0)); // max is 53.3 minutes + + private static final Aggregation AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM = + Aggregation.explicitBucketHistogram( + ImmutableList.of( + 1.0, + 2.0, + 4.0, + 8.0, + 16.0, + 32.0, + 64.0, + 125.0, + 250.0, + 500.0, + 1_000.0, + 2_000.0, + 4_000.0, + 8_000.0, + 16_000.0, + 32_000.0, + 64_000.0, + 128_000.0, + 250_000.0, + 500_000.0, + 1_000_000.0)); + + public static final String METER_NAME = "bigtable.googleapis.com/internal/client/"; + + static final Set COMMON_ATTRIBUTES = + ImmutableSet.of( + BIGTABLE_PROJECT_ID_KEY, + INSTANCE_ID_KEY, + TABLE_ID_KEY, + APP_PROFILE_KEY, + CLUSTER_ID_KEY, + ZONE_ID_KEY, + METHOD_KEY, + CLIENT_NAME_KEY); + + static void defineView( + ImmutableMap.Builder viewMap, + String id, + Aggregation aggregation, + InstrumentType type, + String unit, + Set attributes) { + InstrumentSelector selector = + InstrumentSelector.builder() + .setName(id) + .setMeterName(METER_NAME) + .setType(type) + .setUnit(unit) + .build(); + Set attributesFilter = + ImmutableSet.builder() + .addAll( + COMMON_ATTRIBUTES.stream().map(AttributeKey::getKey).collect(Collectors.toSet())) + .addAll(attributes.stream().map(AttributeKey::getKey).collect(Collectors.toSet())) + .build(); + View view = + View.builder() + .setName(METER_NAME + id) + .setAggregation(aggregation) + .setAttributeFilter(attributesFilter) + .build(); + + viewMap.put(selector, view); + } + + public static Map getAllViews() { + ImmutableMap.Builder views = ImmutableMap.builder(); + + defineView( + views, + OPERATION_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder() + .addAll(COMMON_ATTRIBUTES) + .add(STREAMING_KEY, STATUS_KEY) + .build()); + defineView( + views, + ATTEMPT_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder() + .addAll(COMMON_ATTRIBUTES) + .add(STREAMING_KEY, STATUS_KEY) + .build()); + defineView( + views, + SERVER_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + FIRST_RESPONSE_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + APPLICATION_BLOCKING_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); + defineView( + views, + CLIENT_BLOCKING_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); + defineView( + views, + RETRY_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + CONNECTIVITY_ERROR_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + + defineView( + views, + PER_CONNECTION_ERROR_COUNT_NAME, + AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM, + InstrumentType.HISTOGRAM, + "1", + ImmutableSet.builder() + .add(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME_KEY) + .build()); + + return views.build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java index 2d8262a93e..abd214d760 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java @@ -16,13 +16,22 @@ package com.google.cloud.bigtable.data.v2.stub.metrics; import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; import com.google.api.gax.retrying.ServerStreamingAttemptException; import com.google.api.gax.tracing.SpanName; -import com.google.cloud.bigtable.stats.StatsRecorderWrapper; -import com.google.common.annotations.VisibleForTesting; +import com.google.cloud.bigtable.Version; import com.google.common.base.Stopwatch; import com.google.common.math.IntMath; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; import java.util.concurrent.CancellationException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -37,8 +46,7 @@ */ class BuiltinMetricsTracer extends BigtableTracer { - private final StatsRecorderWrapper recorder; - + private static final String NAME = "java-bigtable/" + Version.VERSION; private final OperationType operationType; private final SpanName spanName; @@ -64,21 +72,56 @@ class BuiltinMetricsTracer extends BigtableTracer { private boolean flowControlIsDisabled = false; - private AtomicInteger requestLeft = new AtomicInteger(0); + private final AtomicInteger requestLeft = new AtomicInteger(0); // Monitored resource labels private String tableId = "unspecified"; private String zone = "global"; private String cluster = "unspecified"; - private AtomicLong totalClientBlockingTime = new AtomicLong(0); + private final AtomicLong totalClientBlockingTime = new AtomicLong(0); + + private final Attributes baseAttributes; + + private Long serverLatencies = null; + + // OpenCensus (and server) histogram buckets use [start, end), however OpenTelemetry uses (start, + // end]. To work around this, we measure all the latencies in nanoseconds and convert them + // to milliseconds and use DoubleHistogram. This should minimize the chance of a data + // point fall on the bucket boundary that causes off by one errors. + private final DoubleHistogram operationLatenciesHistogram; + private final DoubleHistogram attemptLatenciesHistogram; + private final DoubleHistogram serverLatenciesHistogram; + private final DoubleHistogram firstResponseLatenciesHistogram; + private final DoubleHistogram clientBlockingLatenciesHistogram; + private final DoubleHistogram applicationBlockingLatenciesHistogram; + private final LongCounter connectivityErrorCounter; + private final LongCounter retryCounter; - @VisibleForTesting BuiltinMetricsTracer( - OperationType operationType, SpanName spanName, StatsRecorderWrapper recorder) { + OperationType operationType, + SpanName spanName, + Attributes attributes, + DoubleHistogram operationLatenciesHistogram, + DoubleHistogram attemptLatenciesHistogram, + DoubleHistogram serverLatenciesHistogram, + DoubleHistogram firstResponseLatenciesHistogram, + DoubleHistogram clientBlockingLatenciesHistogram, + DoubleHistogram applicationBlockingLatenciesHistogram, + LongCounter connectivityErrorCounter, + LongCounter retryCounter) { this.operationType = operationType; this.spanName = spanName; - this.recorder = recorder; + this.baseAttributes = attributes; + + this.operationLatenciesHistogram = operationLatenciesHistogram; + this.attemptLatenciesHistogram = attemptLatenciesHistogram; + this.serverLatenciesHistogram = serverLatenciesHistogram; + this.firstResponseLatenciesHistogram = firstResponseLatenciesHistogram; + this.clientBlockingLatenciesHistogram = clientBlockingLatenciesHistogram; + this.applicationBlockingLatenciesHistogram = applicationBlockingLatenciesHistogram; + this.connectivityErrorCounter = connectivityErrorCounter; + this.retryCounter = retryCounter; } @Override @@ -203,13 +246,8 @@ public int getAttempt() { @Override public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) { - // Record the metrics and put in the map after the attempt is done, so we can have cluster and - // zone information if (latency != null) { - recorder.putGfeLatencies(latency); - recorder.putGfeMissingHeaders(0); - } else { - recorder.putGfeMissingHeaders(1); + serverLatencies = latency; } } @@ -220,13 +258,13 @@ public void setLocations(String zone, String cluster) { } @Override - public void batchRequestThrottled(long throttledTimeMs) { - totalClientBlockingTime.addAndGet(throttledTimeMs); + public void batchRequestThrottled(long throttledTimeNanos) { + totalClientBlockingTime.addAndGet(Duration.ofNanos(throttledTimeNanos).toMillis()); } @Override - public void grpcChannelQueuedLatencies(long queuedTimeMs) { - totalClientBlockingTime.addAndGet(queuedTimeMs); + public void grpcChannelQueuedLatencies(long queuedTimeNanos) { + totalClientBlockingTime.addAndGet(queuedTimeNanos); } @Override @@ -239,26 +277,43 @@ private void recordOperationCompletion(@Nullable Throwable status) { return; } operationTimer.stop(); - long operationLatency = operationTimer.elapsed(TimeUnit.MILLISECONDS); + + boolean isStreaming = operationType == OperationType.ServerStreaming; + String statusStr = Util.extractStatus(status); + + // Publish metric data with all the attributes. The attributes get filtered in + // BuiltinMetricsConstants when we construct the views. + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(METHOD_KEY, spanName.toString()) + .put(CLIENT_NAME_KEY, NAME) + .put(STREAMING_KEY, isStreaming) + .put(STATUS_KEY, statusStr) + .build(); + long operationLatencyNano = operationTimer.elapsed(TimeUnit.NANOSECONDS); // Only record when retry count is greater than 0 so the retry // graph will be less confusing if (attemptCount > 1) { - recorder.putRetryCount(attemptCount - 1); + retryCounter.add(attemptCount - 1, attributes); } + operationLatenciesHistogram.record(convertToMs(operationLatencyNano), attributes); + // serverLatencyTimer should already be stopped in recordAttemptCompletion - recorder.putOperationLatencies(operationLatency); - recorder.putApplicationLatencies( - Duration.ofNanos(operationLatencyNano - totalServerLatencyNano.get()).toMillis()); + long applicationLatencyNano = operationLatencyNano - totalServerLatencyNano.get(); + applicationBlockingLatenciesHistogram.record(convertToMs(applicationLatencyNano), attributes); if (operationType == OperationType.ServerStreaming && spanName.getMethodName().equals("ReadRows")) { - recorder.putFirstResponseLatencies(firstResponsePerOpTimer.elapsed(TimeUnit.MILLISECONDS)); + firstResponseLatenciesHistogram.record( + convertToMs(firstResponsePerOpTimer.elapsed(TimeUnit.NANOSECONDS)), attributes); } - - recorder.recordOperation(Util.extractStatus(status), tableId, zone, cluster); } private void recordAttemptCompletion(@Nullable Throwable status) { @@ -273,8 +328,7 @@ private void recordAttemptCompletion(@Nullable Throwable status) { } } - // Make sure to reset the blocking time after recording it for the next attempt - recorder.putClientBlockingLatencies(totalClientBlockingTime.getAndSet(0)); + boolean isStreaming = operationType == OperationType.ServerStreaming; // Patch the status until it's fixed in gax. When an attempt failed, // it'll throw a ServerStreamingAttemptException. Unwrap the exception @@ -283,7 +337,35 @@ private void recordAttemptCompletion(@Nullable Throwable status) { status = status.getCause(); } - recorder.putAttemptLatencies(attemptTimer.elapsed(TimeUnit.MILLISECONDS)); - recorder.recordAttempt(Util.extractStatus(status), tableId, zone, cluster); + String statusStr = Util.extractStatus(status); + + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(METHOD_KEY, spanName.toString()) + .put(CLIENT_NAME_KEY, NAME) + .put(STREAMING_KEY, isStreaming) + .put(STATUS_KEY, statusStr) + .build(); + + clientBlockingLatenciesHistogram.record(convertToMs(totalClientBlockingTime.get()), attributes); + + attemptLatenciesHistogram.record( + convertToMs(attemptTimer.elapsed(TimeUnit.NANOSECONDS)), attributes); + + if (serverLatencies != null) { + serverLatenciesHistogram.record(serverLatencies, attributes); + connectivityErrorCounter.add(0, attributes); + } else { + connectivityErrorCounter.add(1, attributes); + } + } + + private static double convertToMs(long nanoSeconds) { + double toMs = 1e-6; + return nanoSeconds * toMs; } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java index 794997071d..f0ac656978 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java @@ -15,29 +15,112 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; + import com.google.api.core.InternalApi; import com.google.api.gax.tracing.ApiTracer; import com.google.api.gax.tracing.ApiTracerFactory; import com.google.api.gax.tracing.BaseApiTracerFactory; import com.google.api.gax.tracing.SpanName; -import com.google.cloud.bigtable.stats.StatsWrapper; -import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import java.io.IOException; /** - * {@link ApiTracerFactory} that will generate OpenCensus metrics by using the {@link ApiTracer} + * {@link ApiTracerFactory} that will generate OpenTelemetry metrics by using the {@link ApiTracer} * api. */ @InternalApi("For internal use only") public class BuiltinMetricsTracerFactory extends BaseApiTracerFactory { - private final ImmutableMap statsAttributes; + private final Attributes attributes; + + private static final String MILLISECOND = "ms"; + private static final String COUNT = "1"; - public static BuiltinMetricsTracerFactory create(ImmutableMap statsAttributes) { - return new BuiltinMetricsTracerFactory(statsAttributes); + private final DoubleHistogram operationLatenciesHistogram; + private final DoubleHistogram attemptLatenciesHistogram; + private final DoubleHistogram serverLatenciesHistogram; + private final DoubleHistogram firstResponseLatenciesHistogram; + private final DoubleHistogram clientBlockingLatenciesHistogram; + private final DoubleHistogram applicationBlockingLatenciesHistogram; + private final LongCounter connectivityErrorCounter; + private final LongCounter retryCounter; + + public static BuiltinMetricsTracerFactory create( + OpenTelemetry openTelemetry, Attributes attributes) throws IOException { + return new BuiltinMetricsTracerFactory(openTelemetry, attributes); } - private BuiltinMetricsTracerFactory(ImmutableMap statsAttributes) { - this.statsAttributes = statsAttributes; + BuiltinMetricsTracerFactory(OpenTelemetry openTelemetry, Attributes attributes) { + this.attributes = attributes; + Meter meter = openTelemetry.getMeter(METER_NAME); + + operationLatenciesHistogram = + meter + .histogramBuilder(OPERATION_LATENCIES_NAME) + .setDescription( + "Total time until final operation success or failure, including retries and backoff.") + .setUnit(MILLISECOND) + .build(); + attemptLatenciesHistogram = + meter + .histogramBuilder(ATTEMPT_LATENCIES_NAME) + .setDescription("Client observed latency per RPC attempt.") + .setUnit(MILLISECOND) + .build(); + serverLatenciesHistogram = + meter + .histogramBuilder(SERVER_LATENCIES_NAME) + .setDescription( + "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.") + .setUnit(MILLISECOND) + .build(); + firstResponseLatenciesHistogram = + meter + .histogramBuilder(FIRST_RESPONSE_LATENCIES_NAME) + .setDescription( + "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.") + .setUnit(MILLISECOND) + .build(); + clientBlockingLatenciesHistogram = + meter + .histogramBuilder(CLIENT_BLOCKING_LATENCIES_NAME) + .setDescription( + "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.") + .setUnit(MILLISECOND) + .build(); + applicationBlockingLatenciesHistogram = + meter + .histogramBuilder(APPLICATION_BLOCKING_LATENCIES_NAME) + .setDescription( + "The latency of the client application consuming available response data.") + .setUnit(MILLISECOND) + .build(); + connectivityErrorCounter = + meter + .counterBuilder(CONNECTIVITY_ERROR_COUNT_NAME) + .setDescription( + "Number of requests that failed to reach the Google datacenter. (Requests without google response headers") + .setUnit(COUNT) + .build(); + retryCounter = + meter + .counterBuilder(RETRY_COUNT_NAME) + .setDescription("The number of additional RPCs sent after the initial attempt.") + .setUnit(COUNT) + .build(); } @Override @@ -45,6 +128,14 @@ public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType op return new BuiltinMetricsTracer( operationType, spanName, - StatsWrapper.createRecorder(operationType, spanName, statsAttributes)); + attributes, + operationLatenciesHistogram, + attemptLatenciesHistogram, + serverLatenciesHistogram, + firstResponseLatenciesHistogram, + clientBlockingLatenciesHistogram, + applicationBlockingLatenciesHistogram, + connectivityErrorCounter, + retryCounter); } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java new file mode 100644 index 0000000000..445160a146 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import java.io.IOException; +import java.util.Map; +import javax.annotation.Nullable; + +/** + * A util class to register built-in metrics on a custom OpenTelemetry instance. This is for + * advanced usage, and is only necessary when wanting to write built-in metrics to cloud monitoring + * and custom sinks. Please refer to {@link CustomOpenTelemetryMetricsProvider} for example usage. + */ +public class BuiltinMetricsView { + + private BuiltinMetricsView() {} + + /** + * Register built-in metrics on the {@link SdkMeterProviderBuilder} with application default + * credentials. + */ + public static void registerBuiltinMetrics(String projectId, SdkMeterProviderBuilder builder) + throws IOException { + BuiltinMetricsView.registerBuiltinMetrics( + projectId, GoogleCredentials.getApplicationDefault(), builder); + } + + /** Register built-in metrics on the {@link SdkMeterProviderBuilder} with credentials. */ + public static void registerBuiltinMetrics( + String projectId, @Nullable Credentials credentials, SdkMeterProviderBuilder builder) + throws IOException { + MetricExporter metricExporter = BigtableCloudMonitoringExporter.create(projectId, credentials); + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + builder.registerView(entry.getKey(), entry.getValue()); + } + builder.registerMetricReader(PeriodicMetricReader.create(metricExporter)); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java new file mode 100644 index 0000000000..ba3034559d --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.common.base.MoreObjects; +import io.opentelemetry.api.OpenTelemetry; + +/** + * Set a custom OpenTelemetry instance. + * + *

To register client side metrics on the custom OpenTelemetry: + * + *

{@code
+ * SdkMeterProviderBuilder sdkMeterProvider = SdkMeterProvider.builder();
+ *
+ * // register Builtin metrics on your meter provider with default credentials
+ * BuiltinMetricsViews.registerBuiltinMetrics("project-id", sdkMeterProvider);
+ *
+ * // register other metrics reader and views
+ * sdkMeterProvider.registerMetricReader(..);
+ * sdkMeterProvider.registerView(..);
+ *
+ * // create the OTEL instance
+ * OpenTelemetry openTelemetry = OpenTelemetrySdk
+ *     .builder()
+ *     .setMeterProvider(sdkMeterProvider.build())
+ *     .build();
+ *
+ * // Override MetricsProvider in BigtableDataSettings
+ * BigtableDataSettings settings = BigtableDataSettings.newBuilder()
+ *   .setProjectId("my-project")
+ *   .setInstanceId("my-instance-id")
+ *   .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)
+ *   .build();
+ * }
+ */ +public final class CustomOpenTelemetryMetricsProvider implements MetricsProvider { + + private final OpenTelemetry otel; + + public static CustomOpenTelemetryMetricsProvider create(OpenTelemetry otel) { + return new CustomOpenTelemetryMetricsProvider(otel); + } + + private CustomOpenTelemetryMetricsProvider(OpenTelemetry otel) { + this.otel = otel; + } + + public OpenTelemetry getOpenTelemetry() { + return otel; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("openTelemetry", otel).toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java new file mode 100644 index 0000000000..b8aad8c931 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java @@ -0,0 +1,63 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalApi; +import com.google.auth.Credentials; +import com.google.common.base.MoreObjects; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import java.io.IOException; +import javax.annotation.Nullable; + +/** + * Set {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}, + * to {@link this#INSTANCE} to enable collecting and export client side metrics + * https://cloud.google.com/bigtable/docs/client-side-metrics. This is the default setting in {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings}. + */ +public final class DefaultMetricsProvider implements MetricsProvider { + + public static DefaultMetricsProvider INSTANCE = new DefaultMetricsProvider(); + + private OpenTelemetry openTelemetry; + private String projectId; + + private DefaultMetricsProvider() {} + + @InternalApi + public OpenTelemetry getOpenTelemetry(String projectId, @Nullable Credentials credentials) + throws IOException { + this.projectId = projectId; + if (openTelemetry == null) { + SdkMeterProviderBuilder meterProvider = SdkMeterProvider.builder(); + BuiltinMetricsView.registerBuiltinMetrics(projectId, credentials, meterProvider); + openTelemetry = OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + } + return openTelemetry; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("projectId", projectId) + .add("openTelemetry", openTelemetry) + .toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java index cab3b0bbd0..a891df9509 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java @@ -15,12 +15,15 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME; + import com.google.api.core.InternalApi; -import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection; -import com.google.cloud.bigtable.stats.StatsWrapper; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; import io.grpc.ClientInterceptor; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; import java.util.Collections; import java.util.Set; import java.util.WeakHashMap; @@ -30,24 +33,30 @@ /* Background task that goes through all connections and updates the errors_per_connection metric. */ @InternalApi("For internal use only") public class ErrorCountPerConnectionMetricTracker implements Runnable { + private static final Integer PER_CONNECTION_ERROR_COUNT_PERIOD_SECONDS = 60; + + private final LongHistogram perConnectionErrorCountHistogram; + private final Attributes attributes; + private final Set connectionErrorCountInterceptors; private final Object interceptorsLock = new Object(); - // This is not final so that it can be updated and mocked during testing. - private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection; - @VisibleForTesting - void setStatsRecorderWrapperForConnection( - StatsRecorderWrapperForConnection statsRecorderWrapperForConnection) { - this.statsRecorderWrapperForConnection = statsRecorderWrapperForConnection; - } - - public ErrorCountPerConnectionMetricTracker(ImmutableMap builtinAttributes) { + public ErrorCountPerConnectionMetricTracker(OpenTelemetry openTelemetry, Attributes attributes) { connectionErrorCountInterceptors = Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<>())); - this.statsRecorderWrapperForConnection = - StatsWrapper.createRecorderForConnection(builtinAttributes); + Meter meter = openTelemetry.getMeter(METER_NAME); + + perConnectionErrorCountHistogram = + meter + .histogramBuilder(PER_CONNECTION_ERROR_COUNT_NAME) + .ofLongs() + .setDescription("Distribution of counts of channels per 'error count per minute'.") + .setUnit("1") + .build(); + + this.attributes = attributes; } public void startConnectionErrorCountTracker(ScheduledExecutorService scheduler) { @@ -75,7 +84,7 @@ public void run() { if (errors > 0 || successes > 0) { // TODO: add a metric to also keep track of the number of successful requests per each // connection. - statsRecorderWrapperForConnection.putAndRecordPerConnectionErrorCount(errors); + perConnectionErrorCountHistogram.record(errors, attributes); } } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java new file mode 100644 index 0000000000..251bb41619 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java @@ -0,0 +1,25 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalExtensionOnly; + +/** + * Provide client side metrics https://cloud.google.com/bigtable/docs/client-side-metrics + * implementations. + */ +@InternalExtensionOnly +public interface MetricsProvider {} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java new file mode 100644 index 0000000000..9a00ddb135 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java @@ -0,0 +1,36 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.common.base.MoreObjects; + +/** + * Set {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}, + * to {@link this#INSTANCE} to disable collecting and export client side metrics + * https://cloud.google.com/bigtable/docs/client-side-metrics. + */ +public final class NoopMetricsProvider implements MetricsProvider { + + public static NoopMetricsProvider INSTANCE = new NoopMetricsProvider(); + + private NoopMetricsProvider() {} + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java index b7140f0156..ce73d75dc1 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java @@ -21,6 +21,7 @@ import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.rpc.UnaryCallable; import com.google.api.gax.tracing.ApiTracer; +import org.threeten.bp.Duration; /** * This callable will extract total throttled time from {@link ApiCallContext} and add it to {@link @@ -42,7 +43,8 @@ public ApiFuture futureCall(RequestT request, ApiCallContext context) // this should always be true if (tracer instanceof BigtableTracer) { ((BigtableTracer) tracer) - .batchRequestThrottled(context.getOption(Batcher.THROTTLED_TIME_KEY)); + .batchRequestThrottled( + Duration.ofMillis(context.getOption(Batcher.THROTTLED_TIME_KEY)).toNanos()); } } return innerCallable.futureCall(request, context); diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java index a35112b380..fea66e82bf 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java @@ -36,6 +36,7 @@ import com.google.bigtable.v2.ReadRowsResponse; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider; import com.google.common.base.Preconditions; import com.google.common.io.BaseEncoding; import io.grpc.Attributes; @@ -169,10 +170,13 @@ public void tearDown() { @Test public void testNewClientsShareTransportChannel() throws Exception { - // Create 3 lightweight clients - - try (BigtableDataClientFactory factory = BigtableDataClientFactory.create(defaultSettings); + try (BigtableDataClientFactory factory = + BigtableDataClientFactory.create( + defaultSettings + .toBuilder() + .setMetricsProvider(NoopMetricsProvider.INSTANCE) + .build()); BigtableDataClient ignored1 = factory.createForInstance("project1", "instance1"); BigtableDataClient ignored2 = factory.createForInstance("project2", "instance2"); BigtableDataClient ignored3 = factory.createForInstance("project3", "instance3")) { @@ -316,7 +320,7 @@ public void testFeatureFlags() throws Exception { @Test public void testBulkMutationFlowControllerConfigured() throws Exception { BigtableDataSettings settings = - BigtableDataSettings.newBuilder() + BigtableDataSettings.newBuilderForEmulator(server.getPort()) .setProjectId("my-project") .setInstanceId("my-instance") .setCredentialsProvider(credentialsProvider) diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java index 4e75fb8631..56181a20ab 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java @@ -15,34 +15,64 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getStartTimeSeconds; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes; +import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.client.util.Lists; +import com.google.cloud.bigtable.admin.v2.BigtableInstanceAdminClient; import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient; +import com.google.cloud.bigtable.admin.v2.models.AppProfile; +import com.google.cloud.bigtable.admin.v2.models.CreateAppProfileRequest; import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; import com.google.cloud.bigtable.admin.v2.models.Table; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.PrefixGenerator; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; import com.google.cloud.monitoring.v3.MetricServiceClient; import com.google.common.base.Stopwatch; +import com.google.common.collect.BoundType; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Range; import com.google.monitoring.v3.ListTimeSeriesRequest; import com.google.monitoring.v3.ListTimeSeriesResponse; +import com.google.monitoring.v3.Point; import com.google.monitoring.v3.ProjectName; import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Timestamp; import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -50,6 +80,7 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.threeten.bp.Duration; +import org.threeten.bp.Instant; @RunWith(JUnit4.class) public class BuiltinMetricsIT { @@ -58,71 +89,131 @@ public class BuiltinMetricsIT { private static final Logger logger = Logger.getLogger(BuiltinMetricsIT.class.getName()); @Rule public Timeout globalTimeout = Timeout.seconds(900); - private static Table table; - private static BigtableTableAdminClient tableAdminClient; - private static MetricServiceClient metricClient; + + private Table tableCustomOtel; + private Table tableDefault; + private BigtableDataClient clientCustomOtel; + private BigtableDataClient clientDefault; + private BigtableTableAdminClient tableAdminClient; + private BigtableInstanceAdminClient instanceAdminClient; + private MetricServiceClient metricClient; + + private InMemoryMetricReader metricReader; + private String appProfileCustomOtel; + private String appProfileDefault; public static String[] VIEWS = { "operation_latencies", "attempt_latencies", "connectivity_error_count", - "application_blocking_latencies" + "application_blocking_latencies", }; - @BeforeClass - public static void setUpClass() throws IOException { + @Before + public void setup() throws IOException { + // This test tests 2 things. End-to-end test using the default OTEL instance created by the + // client, and also end-to-end test using a custom OTEL instance set by the customer. In + // both tests, a BigtableCloudMonitoringExporter is created to export data to Cloud Monitoring. assume() .withMessage("Builtin metrics integration test is not supported by emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - // Enable built in metrics - BigtableDataSettings.enableBuiltinMetrics(); - // Create a cloud monitoring client metricClient = MetricServiceClient.create(); tableAdminClient = testEnvRule.env().getTableAdminClient(); + instanceAdminClient = testEnvRule.env().getInstanceAdminClient(); + appProfileCustomOtel = PrefixGenerator.newPrefix("test1"); + appProfileDefault = PrefixGenerator.newPrefix("test2"); + instanceAdminClient.createAppProfile( + CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileCustomOtel) + .setRoutingPolicy( + AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId())) + .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW))); + instanceAdminClient.createAppProfile( + CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileDefault) + .setRoutingPolicy( + AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId())) + .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW))); + + // When using the custom OTEL instance, we can also register a InMemoryMetricReader on the + // SdkMeterProvider to verify the data exported on Cloud Monitoring with the in memory metric + // data collected in InMemoryMetricReader. + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + clientCustomOtel = + BigtableDataClient.create( + settings + .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)) + .setAppProfileId(appProfileCustomOtel) + .build()); + clientDefault = BigtableDataClient.create(settings.setAppProfileId(appProfileDefault).build()); } - @AfterClass - public static void tearDown() { + @After + public void tearDown() { if (metricClient != null) { metricClient.close(); } - if (table != null) { - tableAdminClient.deleteTable(table.getId()); + if (tableCustomOtel != null) { + tableAdminClient.deleteTable(tableCustomOtel.getId()); + } + if (tableDefault != null) { + tableAdminClient.deleteTable(tableDefault.getId()); + } + if (instanceAdminClient != null) { + instanceAdminClient.deleteAppProfile( + testEnvRule.env().getInstanceId(), appProfileCustomOtel, true); + instanceAdminClient.deleteAppProfile( + testEnvRule.env().getInstanceId(), appProfileDefault, true); + } + if (clientCustomOtel != null) { + clientCustomOtel.close(); + } + if (clientDefault != null) { + clientDefault.close(); } } @Test - public void testBuiltinMetrics() throws Exception { - logger.info("Started testing builtin metrics"); - table = + public void testBuiltinMetricsWithDefaultOTEL() throws Exception { + logger.info("Started testing builtin metrics with default OTEL"); + tableDefault = tableAdminClient.createTable( - CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test")) + CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test1")) .addFamily("cf")); - logger.info("Create table: " + table.getId()); - // Send a MutateRow and ReadRows request - testEnvRule - .env() - .getDataClient() - .mutateRow(RowMutation.create(table.getId(), "a-new-key").setCell("cf", "q", "abc")); + logger.info("Create default table: " + tableDefault.getId()); + + Instant start = Instant.now().minus(Duration.ofSeconds(10)); + + // Send a MutateRow and ReadRows request and measure the latencies for these requests. + clientDefault.mutateRow( + RowMutation.create(tableDefault.getId(), "a-new-key").setCell("cf", "q", "abc")); ArrayList rows = - Lists.newArrayList( - testEnvRule.env().getDataClient().readRows(Query.create(table.getId()).limit(10))); + Lists.newArrayList(clientDefault.readRows(Query.create(tableDefault.getId()).limit(10))); - Stopwatch stopwatch = Stopwatch.createStarted(); + // This stopwatch is used for to limit fetching of metric data in verifyMetrics + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); ProjectName name = ProjectName.of(testEnvRule.env().getProjectId()); - // Restrict time to last 10 minutes and 5 minutes after the request - long startMillis = System.currentTimeMillis() - Duration.ofMinutes(10).toMillis(); - long endMillis = startMillis + Duration.ofMinutes(15).toMillis(); + // Interval is set in the monarch request when query metric timestamps. + // Restrict it to before we send to request and 3 minute after we send the request. If + // it turns out to be still flaky we can increase the filter range. + Instant end = Instant.now().plus(Duration.ofMinutes(3)); TimeInterval interval = TimeInterval.newBuilder() - .setStartTime(Timestamps.fromMillis(startMillis)) - .setEndTime(Timestamps.fromMillis(endMillis)) + .setStartTime(Timestamps.fromMillis(start.toEpochMilli())) + .setEndTime(Timestamps.fromMillis(end.toEpochMilli())) .build(); for (String view : VIEWS) { @@ -132,42 +223,123 @@ public void testBuiltinMetrics() throws Exception { String.format( "metric.type=\"bigtable.googleapis.com/client/%s\" " + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\"" - + " AND resource.labels.table=\"%s\"", - view, testEnvRule.env().getInstanceId(), table.getId()); + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault); ListTimeSeriesRequest.Builder requestBuilder = ListTimeSeriesRequest.newBuilder() .setName(name.toString()) .setFilter(metricFilter) .setInterval(interval) .setView(ListTimeSeriesRequest.TimeSeriesView.FULL); - - verifyMetricsArePublished(requestBuilder.build(), stopwatch, view); + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); // Verify that metrics are published for ReadRows request metricFilter = String.format( "metric.type=\"bigtable.googleapis.com/client/%s\" " + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\"" - + " AND resource.labels.table=\"%s\"", - view, testEnvRule.env().getInstanceId(), table.getId()); + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault); + requestBuilder.setFilter(metricFilter); + + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + } + } + + @Test + public void testBuiltinMetricsWithCustomOTEL() throws Exception { + logger.info("Started testing builtin metrics with custom OTEL"); + tableCustomOtel = + tableAdminClient.createTable( + CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test2")) + .addFamily("cf")); + logger.info("Create custom table: " + tableCustomOtel.getId()); + + Instant start = Instant.now().minus(Duration.ofSeconds(10)); + // Send a MutateRow and ReadRows request and measure the latencies for these requests. + clientCustomOtel.mutateRow( + RowMutation.create(tableCustomOtel.getId(), "a-new-key").setCell("cf", "q", "abc")); + ArrayList rows = + Lists.newArrayList( + clientCustomOtel.readRows(Query.create(tableCustomOtel.getId()).limit(10))); + + // This stopwatch is used for to limit fetching of metric data in verifyMetrics + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); + + ProjectName name = ProjectName.of(testEnvRule.env().getProjectId()); + + Collection fromMetricReader = metricReader.collectAllMetrics(); + + // Interval is set in the monarch request when query metric timestamps. + // Restrict it to before we send to request and 3 minute after we send the request. If + // it turns out to be still flaky we can increase the filter range. + Instant end = start.plus(Duration.ofMinutes(3)); + TimeInterval interval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromMillis(start.toEpochMilli())) + .setEndTime(Timestamps.fromMillis(end.toEpochMilli())) + .build(); + + for (String view : VIEWS) { + String otelMetricName = view; + if (view.equals("application_blocking_latencies")) { + otelMetricName = "application_latencies"; + } + MetricData dataFromReader = getMetricData(fromMetricReader, otelMetricName); + + // Filter on instance and method name + // Verify that metrics are correct for MutateRows request + String metricFilter = + String.format( + "metric.type=\"bigtable.googleapis.com/client/%s\" " + + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\"" + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, + testEnvRule.env().getInstanceId(), + tableCustomOtel.getId(), + appProfileCustomOtel); + ListTimeSeriesRequest.Builder requestBuilder = + ListTimeSeriesRequest.newBuilder() + .setName(name.toString()) + .setFilter(metricFilter) + .setInterval(interval) + .setView(ListTimeSeriesRequest.TimeSeriesView.FULL); + + ListTimeSeriesResponse response = + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + verifyMetricsWithMetricsReader(response, dataFromReader); + + // Verify that metrics are correct for ReadRows request + metricFilter = + String.format( + "metric.type=\"bigtable.googleapis.com/client/%s\" " + + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\"" + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, + testEnvRule.env().getInstanceId(), + tableCustomOtel.getId(), + appProfileCustomOtel); requestBuilder.setFilter(metricFilter); - verifyMetricsArePublished(requestBuilder.build(), stopwatch, view); + response = verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + verifyMetricsWithMetricsReader(response, dataFromReader); } } - private void verifyMetricsArePublished( - ListTimeSeriesRequest request, Stopwatch stopwatch, String view) throws Exception { + private ListTimeSeriesResponse verifyMetricsArePublished( + ListTimeSeriesRequest request, Stopwatch metricsPollingStopwatch, String view) + throws Exception { ListTimeSeriesResponse response = metricClient.listTimeSeriesCallable().call(request); - logger.log( - Level.INFO, - "Checking for view " - + view - + ", has timeseries=" - + response.getTimeSeriesCount() - + " stopwatch elapsed " - + stopwatch.elapsed(TimeUnit.MINUTES)); - while (response.getTimeSeriesCount() == 0 && stopwatch.elapsed(TimeUnit.MINUTES) < 10) { + while (response.getTimeSeriesCount() == 0 + && metricsPollingStopwatch.elapsed(TimeUnit.MINUTES) < 10) { + logger.log( + Level.INFO, + "Checking for view " + + view + + ", has timeseries=" + + response.getTimeSeriesCount() + + " stopwatch elapsed " + + metricsPollingStopwatch.elapsed(TimeUnit.MINUTES)); // Call listTimeSeries every minute Thread.sleep(Duration.ofMinutes(1).toMillis()); response = metricClient.listTimeSeriesCallable().call(request); @@ -176,5 +348,64 @@ private void verifyMetricsArePublished( assertWithMessage("View " + view + " didn't return any data.") .that(response.getTimeSeriesCount()) .isGreaterThan(0); + + return response; + } + + private void verifyMetricsWithMetricsReader( + ListTimeSeriesResponse response, MetricData dataFromReader) { + for (TimeSeries ts : response.getTimeSeriesList()) { + Map attributesMap = + ImmutableMap.builder() + .putAll(ts.getResource().getLabelsMap()) + .putAll(ts.getMetric().getLabelsMap()) + .build(); + AttributesBuilder attributesBuilder = Attributes.builder(); + String streamingKey = BuiltinMetricsConstants.STREAMING_KEY.getKey(); + attributesMap.forEach( + (k, v) -> { + if (!k.equals(streamingKey)) { + attributesBuilder.put(k, v); + } + }); + if (attributesMap.containsKey(streamingKey)) { + attributesBuilder.put(streamingKey, Boolean.parseBoolean(attributesMap.get(streamingKey))); + } + Attributes attributes = attributesBuilder.build(); + verifyAttributes(dataFromReader, attributes); + long expectedValue = getAggregatedValue(dataFromReader, attributes); + Timestamp startTime = getStartTimeSeconds(dataFromReader, attributes); + assertThat(startTime.getSeconds()).isGreaterThan(0); + List point = + ts.getPointsList().stream() + .filter( + p -> + Timestamps.compare(p.getInterval().getStartTime(), startTime) >= 0 + && Timestamps.compare( + p.getInterval().getStartTime(), + Timestamps.add( + startTime, + com.google.protobuf.Duration.newBuilder() + .setSeconds(60) + .build())) + < 0) + .collect(Collectors.toList()); + if (point.size() > 0) { + long actualValue = (long) point.get(0).getValue().getDistributionValue().getMean(); + assertWithMessage( + "actual value does not match expected value, actual value " + + actualValue + + " expected value " + + expectedValue + + " actual start time " + + point.get(0).getInterval().getStartTime() + + " expected start time " + + startTime) + .that(actualValue) + .isIn( + Range.range( + expectedValue - 1, BoundType.CLOSED, expectedValue + 1, BoundType.CLOSED)); + } + } } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java new file mode 100644 index 0000000000..56f6bfa476 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.it; + +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.common.truth.Correspondence; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; + +public class MetricsITUtils { + + static final Correspondence METRIC_DATA_NAME_CONTAINS = + Correspondence.from((md, s) -> md.getName().contains(s), "contains name"); + + static final Correspondence POINT_DATA_CLUSTER_ID_CONTAINS = + Correspondence.from( + (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY).contains(s), + "contains attributes"); + + static final Correspondence POINT_DATA_ZONE_ID_CONTAINS = + Correspondence.from( + (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY).contains(s), + "contains attributes"); +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java index b0e12d5ade..84ab24f1c8 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java @@ -15,37 +15,76 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.core.ApiFuture; import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.bigtable.admin.v2.models.Cluster; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; -import com.google.cloud.bigtable.stats.BuiltinViews; -import com.google.cloud.bigtable.stats.StatsWrapper; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; import com.google.common.collect.Lists; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; public class StreamingMetricsMetadataIT { @ClassRule public static TestEnvRule testEnvRule = new TestEnvRule(); - @BeforeClass - public static void setUpClass() { + private BigtableDataClient client; + private InMemoryMetricReader metricReader; + + @Before + public void setup() throws IOException { assume() .withMessage("StreamingMetricsMetadataIT is not supported on Emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - BuiltinViews.registerBigtableBuiltinViews(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)); + client = BigtableDataClient.create(settings.build()); + } + + @After + public void tearDown() throws IOException { + if (client != null) { + client.close(); + } } @Test @@ -54,7 +93,7 @@ public void testSuccess() throws Exception { String uniqueKey = prefix + "-read"; Query query = Query.create(testEnvRule.env().getTableId()).rowKey(uniqueKey); - ArrayList rows = Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query)); + ArrayList rows = Lists.newArrayList(client.readRows(query)); ApiFuture> clustersFuture = testEnvRule @@ -64,27 +103,73 @@ public void testSuccess() throws Exception { List clusters = clustersFuture.get(1, TimeUnit.MINUTES); - // give opencensus some time to populate view data - Thread.sleep(100); + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + metricReader.collectAllMetrics().stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains(clusters.get(0).getZone()); - assertThat(tagValueStrings).contains(clusters.get(0).getId()); + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); + + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS) + .contains(clusters.get(0).getZone()); + assertThat(clusterAttributes).contains(clusters.get(0).getId()); + assertThat(zoneAttributes).contains(clusters.get(0).getZone()); } @Test - public void testFailure() throws InterruptedException { + public void testFailure() { Query query = Query.create("non-exist-table"); try { - Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query)); + Lists.newArrayList(client.readRows(query)); } catch (NotFoundException e) { } - // give opencensus some time to populate view data - Thread.sleep(100); + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + metricReader.collectAllMetrics().stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); + + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains("unspecified"); - assertThat(tagValueStrings).contains("global"); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains("unspecified"); + assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global"); + assertThat(clusterAttributes).contains("unspecified"); + assertThat(zoneAttributes).contains("global"); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java index aa2a4317fc..ad5f71db8f 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java @@ -15,35 +15,76 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.core.ApiFuture; import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.bigtable.admin.v2.models.Cluster; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.RowMutation; -import com.google.cloud.bigtable.stats.BuiltinViews; -import com.google.cloud.bigtable.stats.StatsWrapper; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; public class UnaryMetricsMetadataIT { @ClassRule public static TestEnvRule testEnvRule = new TestEnvRule(); - @BeforeClass - public static void setUpClass() { + private BigtableDataClient client; + private InMemoryMetricReader metricReader; + + @Before + public void setup() throws IOException { assume() .withMessage("UnaryMetricsMetadataIT is not supported on Emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - BuiltinViews.registerBigtableBuiltinViews(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)); + + client = BigtableDataClient.create(settings.build()); + } + + @After + public void tearDown() throws IOException { + if (client != null) { + client.close(); + } } @Test @@ -52,9 +93,7 @@ public void testSuccess() throws Exception { String familyId = testEnvRule.env().getFamilyId(); ApiFuture future = - testEnvRule - .env() - .getDataClient() + client .mutateRowCallable() .futureCall( RowMutation.create(testEnvRule.env().getTableId(), rowKey) @@ -69,18 +108,36 @@ public void testSuccess() throws Exception { .listClustersAsync(testEnvRule.env().getInstanceId()); List clusters = clustersFuture.get(1, TimeUnit.MINUTES); - // give opencensus some time to populate view data - for (int i = 0; i < 10; i++) { - if (StatsWrapper.getOperationLatencyViewTagValueStrings() - .contains(clusters.get(0).getZone())) { - break; - } - Thread.sleep(100); - } + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + allMetricData.stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); + + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains(clusters.get(0).getZone()); - assertThat(tagValueStrings).contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS) + .contains(clusters.get(0).getZone()); + assertThat(clusterAttributes).contains(clusters.get(0).getId()); + assertThat(zoneAttributes).contains(clusters.get(0).getZone()); } @Test @@ -89,9 +146,7 @@ public void testFailure() throws Exception { String familyId = testEnvRule.env().getFamilyId(); ApiFuture future = - testEnvRule - .env() - .getDataClient() + client .mutateRowCallable() .futureCall( RowMutation.create("non-exist-table", rowKey).setCell(familyId, "q", "myVal")); @@ -106,16 +161,39 @@ public void testFailure() throws Exception { } } - // give opencensus some time to populate view data - for (int i = 0; i < 10; i++) { - if (StatsWrapper.getOperationLatencyViewTagValueStrings().contains("unspecified")) { + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = null; + for (MetricData md : allMetricData) { + if (md.getName() + .equals( + BuiltinMetricsConstants.METER_NAME + + BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME)) { + metricData = md; break; } - Thread.sleep(100); } - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains("unspecified"); - assertThat(tagValueStrings).contains("global"); + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metricData).isNotNull(); + + List pointData = new ArrayList<>(metricData.getData().getPoints()); + + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains("unspecified"); + assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global"); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); + + assertThat(clusterAttributes).contains("unspecified"); + assertThat(zoneAttributes).contains("global"); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java index 79cbccb0ac..290fcc321f 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java @@ -885,6 +885,7 @@ public void enableRetryInfoFalseValueTest() throws IOException { "generateInitialChangeStreamPartitionsSettings", "readChangeStreamSettings", "pingAndWarmSettings", + "metricsProvider", }; @Test diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java new file mode 100644 index 0000000000..a0b9c058dc --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java @@ -0,0 +1,310 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.Distribution; +import com.google.api.MonitoredResource; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.stub.MetricServiceStub; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.common.InstrumentationScopeInfo; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData; +import io.opentelemetry.sdk.resources.Resource; +import java.util.Arrays; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +public class BigtableCloudMonitoringExporterTest { + private static final String projectId = "fake-project"; + private static final String instanceId = "fake-instance"; + private static final String appProfileId = "default"; + private static final String tableId = "fake-table"; + private static final String zone = "us-east-1"; + private static final String cluster = "cluster-1"; + + private static final String clientName = "fake-client-name"; + private static final String taskId = "fake-task-id"; + + @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock private MetricServiceStub mockMetricServiceStub; + private MetricServiceClient fakeMetricServiceClient; + private BigtableCloudMonitoringExporter exporter; + + private Attributes attributes; + private Resource resource; + private InstrumentationScopeInfo scope; + + @Before + public void setUp() { + fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub); + + exporter = + new BigtableCloudMonitoringExporter( + projectId, fakeMetricServiceClient, /* applicationResource= */ null, taskId); + + attributes = + Attributes.builder() + .put(BIGTABLE_PROJECT_ID_KEY, projectId) + .put(INSTANCE_ID_KEY, instanceId) + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(APP_PROFILE_KEY, appProfileId) + .build(); + + resource = Resource.create(Attributes.empty()); + + scope = InstrumentationScopeInfo.create(BuiltinMetricsConstants.METER_NAME); + } + + @After + public void tearDown() {} + + @Test + public void testExportingSumData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long fakeValue = 11L; + + long startEpoch = 10; + long endEpoch = 15; + LongPointData longPointData = + ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue); + + MetricData longData = + ImmutableMetricData.createLongSum( + resource, + scope, + "bigtable.googleapis.com/internal/client/retry_count", + "description", + "1", + ImmutableSumData.create( + true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData))); + + exporter.export(Arrays.asList(longData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + BIGTABLE_PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + TABLE_ID_KEY.getKey(), tableId, + CLUSTER_ID_KEY.getKey(), cluster, + ZONE_ID_KEY.getKey(), zone); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId); + assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testExportingHistogramData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + attributes, + 3d, + true, + 1d, // min + true, + 2d, // max + Arrays.asList(1.0), + Arrays.asList(1L, 2L)); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "bigtable.googleapis.com/internal/client/operation_latencies", + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Arrays.asList(histogramData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + BIGTABLE_PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + TABLE_ID_KEY.getKey(), tableId, + CLUSTER_ID_KEY.getKey(), cluster, + ZONE_ID_KEY.getKey(), zone); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId); + Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue(); + assertThat(distribution.getCount()).isEqualTo(3); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testTimeSeriesForMetricWithGceOrGkeResource() { + String gceProjectId = "fake-gce-project"; + BigtableCloudMonitoringExporter exporter = + new BigtableCloudMonitoringExporter( + projectId, + fakeMetricServiceClient, + MonitoredResource.newBuilder() + .setType("gce-instance") + .putLabels("some-gce-key", "some-gce-value") + .putLabels("project_id", gceProjectId) + .build(), + taskId); + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + Attributes.of( + BIGTABLE_PROJECT_ID_KEY, + projectId, + INSTANCE_ID_KEY, + instanceId, + APP_PROFILE_KEY, + appProfileId, + CLIENT_NAME_KEY, + clientName), + 3d, + true, + 1d, // min + true, + 2d, // max + Arrays.asList(1.0), + Arrays.asList(1L, 2L)); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "bigtable.googleapis.com/internal/client/per_connection_error_count", + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Arrays.asList(histogramData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getName()).isEqualTo("projects/" + gceProjectId); + assertThat(request.getTimeSeriesList()).hasSize(1); + + com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly("some-gce-key", "some-gce-value", "project_id", gceProjectId); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast( + BIGTABLE_PROJECT_ID_KEY.getKey(), + projectId, + INSTANCE_ID_KEY.getKey(), + instanceId, + APP_PROFILE_KEY.getKey(), + appProfileId, + CLIENT_NAME_KEY.getKey(), + clientName, + CLIENT_UID_KEY.getKey(), + taskId); + } + + private static class FakeMetricServiceClient extends MetricServiceClient { + + protected FakeMetricServiceClient(MetricServiceStub stub) { + super(stub); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java index 5d16b623fd..a12dd3cfbd 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java @@ -45,7 +45,6 @@ import com.google.cloud.bigtable.data.v2.models.SampleRowKeysRequest; import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; -import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; import com.google.common.collect.ImmutableMap; import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; import io.grpc.Metadata; @@ -126,16 +125,21 @@ public void sendHeaders(Metadata headers) { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings stubSettings = - settings - .getStubSettings() + + ClientContext clientContext = + EnhancedBigtableStub.createClientContext(settings.getStubSettings()); + clientContext = + clientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder())) + settings.getStubSettings(), + Tags.getTagger(), + localStats.getStatsRecorder(), + null)) .build(); - attempts = stubSettings.readRowsSettings().getRetrySettings().getMaxAttempts(); - stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings)); + attempts = settings.getStubSettings().readRowsSettings().getRetrySettings().getMaxAttempts(); + stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext); // Create another server without injecting the server-timing header and another stub that // connects to it. @@ -147,18 +151,21 @@ public void sendHeaders(Metadata headers) { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings noHeaderStubSettings = - noHeaderSettings - .getStubSettings() + + ClientContext noHeaderClientContext = + EnhancedBigtableStub.createClientContext(noHeaderSettings.getStubSettings()); + noHeaderClientContext = + noHeaderClientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( noHeaderSettings.getStubSettings(), Tags.getTagger(), - localStats.getStatsRecorder())) + localStats.getStatsRecorder(), + null)) .build(); noHeaderStub = - new EnhancedBigtableStub(noHeaderStubSettings, ClientContext.create(noHeaderStubSettings)); + new EnhancedBigtableStub(noHeaderSettings.getStubSettings(), noHeaderClientContext); } @After diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java new file mode 100644 index 0000000000..09b7e1f663 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java @@ -0,0 +1,112 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.InternalApi; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.Assert; + +@InternalApi +public class BuiltinMetricsTestUtils { + + private BuiltinMetricsTestUtils() {} + + public static MetricData getMetricData(Collection allMetricData, String metricName) { + List metricDataList = + allMetricData.stream() + .filter(md -> md.getName().equals(BuiltinMetricsConstants.METER_NAME + metricName)) + .collect(Collectors.toList()); + if (metricDataList.size() == 0) { + allMetricData.stream().forEach(md -> System.out.println(md.getName())); + } + assertThat(metricDataList.size()).isEqualTo(1); + + return metricDataList.get(0); + } + + public static long getAggregatedValue(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + HistogramPointData hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return (long) hd.getSum() / hd.getCount(); + case LONG_SUM: + LongPointData ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return ld.getValue(); + default: + return 0; + } + } + + public static Timestamp getStartTimeSeconds(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + HistogramPointData hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return Timestamps.fromNanos(hd.getStartEpochNanos()); + case LONG_SUM: + LongPointData ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return Timestamps.fromNanos(ld.getStartEpochNanos()); + default: + return Timestamp.getDefaultInstance(); + } + } + + public static void verifyAttributes(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + List hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()); + assertThat(hd).isNotEmpty(); + break; + case LONG_SUM: + List ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()); + assertThat(ld).isNotEmpty(); + break; + default: + Assert.fail("Unexpected type"); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java index 06b923cad3..2dd4bcabb3 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java @@ -15,14 +15,24 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; -import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes; import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; import com.google.api.client.util.Lists; import com.google.api.core.ApiFunction; @@ -36,7 +46,6 @@ import com.google.api.gax.rpc.NotFoundException; import com.google.api.gax.rpc.ResponseObserver; import com.google.api.gax.rpc.StreamController; -import com.google.api.gax.tracing.SpanName; import com.google.bigtable.v2.BigtableGrpc; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; @@ -45,6 +54,7 @@ import com.google.bigtable.v2.ReadRowsRequest; import com.google.bigtable.v2.ReadRowsResponse; import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.FakeServiceBuilder; import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId; @@ -52,9 +62,9 @@ import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.StatsRecorderWrapper; import com.google.common.base.Stopwatch; import com.google.common.collect.Range; import com.google.protobuf.ByteString; @@ -77,11 +87,21 @@ import io.grpc.StatusRuntimeException; import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; import java.nio.charset.Charset; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -92,12 +112,8 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; -import org.mockito.stubbing.Answer; import org.threeten.bp.Duration; @RunWith(JUnit4.class) @@ -105,8 +121,8 @@ public class BuiltinMetricsTracerTest { private static final String PROJECT_ID = "fake-project"; private static final String INSTANCE_ID = "fake-instance"; private static final String APP_PROFILE_ID = "default"; - private static final String TABLE_ID = "fake-table"; - private static final String AUTHORIZED_VIEW_ID = "fake-authorized-view"; + private static final String TABLE = "fake-table"; + private static final String BAD_TABLE_ID = "non-exist-table"; private static final String ZONE = "us-west-1"; private static final String CLUSTER = "cluster-0"; @@ -114,6 +130,7 @@ public class BuiltinMetricsTracerTest { private static final long SERVER_LATENCY = 100; private static final long APPLICATION_LATENCY = 200; private static final long SLEEP_VARIABILITY = 15; + private static final String CLIENT_NAME = "java-bigtable/" + Version.VERSION; private static final long CHANNEL_BLOCKING_LATENCY = 75; @@ -124,18 +141,35 @@ public class BuiltinMetricsTracerTest { private EnhancedBigtableStub stub; - @Mock private BuiltinMetricsTracerFactory mockFactory; - @Mock private StatsRecorderWrapper statsRecorderWrapper; + private int batchElementCount = 2; - @Captor private ArgumentCaptor status; - @Captor private ArgumentCaptor tableId; - @Captor private ArgumentCaptor zone; - @Captor private ArgumentCaptor cluster; + private Attributes baseAttributes; - private int batchElementCount = 2; + private InMemoryMetricReader metricReader; @Before public void setUp() throws Exception { + metricReader = InMemoryMetricReader.create(); + + baseAttributes = + Attributes.builder() + .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, PROJECT_ID) + .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, INSTANCE_ID) + .put(BuiltinMetricsConstants.APP_PROFILE_KEY, APP_PROFILE_ID) + .build(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + meterProvider.registerView(entry.getKey(), entry.getValue()); + } + + OpenTelemetrySdk otel = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + BuiltinMetricsTracerFactory facotry = BuiltinMetricsTracerFactory.create(otel, baseAttributes); + // Add an interceptor to add server-timing in headers ServerInterceptor trailersInterceptor = new ServerInterceptor() { @@ -216,7 +250,8 @@ public void sendMessage(ReqT message) { .setMaxOutstandingRequestBytes(1001L) .build()) .build()); - stubSettingsBuilder.setTracerFactory(mockFactory); + + stubSettingsBuilder.setTracerFactory(facotry); InstantiatingGrpcChannelProvider.Builder channelProvider = ((InstantiatingGrpcChannelProvider) stubSettingsBuilder.getTransportChannelProvider()) @@ -247,117 +282,117 @@ public void tearDown() { @Test public void testReadRowsOperationLatencies() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - Stopwatch stopwatch = Stopwatch.createStarted(); - Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator()); + Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)).iterator()); long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - // verify record operation is only called once - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(STREAMING_KEY, true) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); - assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed)); - assertThat(status.getAllValues()).containsExactly("OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID); - assertThat(zone.getAllValues()).containsExactly(ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed)); } @Test public void testReadRowsOperationLatenciesOnAuthorizedView() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - + String authorizedViewId = "test-authorized-view-id"; Stopwatch stopwatch = Stopwatch.createStarted(); Lists.newArrayList( - stub.readRowsCallable() - .call(Query.create(AuthorizedViewId.of(TABLE_ID, AUTHORIZED_VIEW_ID))) - .iterator()); + stub.readRowsCallable().call(Query.create(AuthorizedViewId.of(TABLE, authorizedViewId)))); long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - // verify record operation is only called once - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(STREAMING_KEY, true) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed)); - assertThat(status.getAllValues()).containsExactly("OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID); - assertThat(zone.getAllValues()).containsExactly(ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER); + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed)); } @Test public void testGfeMetrics() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor gfeLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor gfeMissingHeaders = ArgumentCaptor.forClass(Long.class); - - Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID))); - - // Verify record attempt are called multiple times - verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - - // The request was retried and gfe latency is only recorded in the retry attempt - verify(statsRecorderWrapper).putGfeLatencies(gfeLatency.capture()); - assertThat(gfeLatency.getValue()).isEqualTo(FAKE_SERVER_TIMING); - - // The first time the request was retried, it'll increment missing header counter - verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get())) - .putGfeMissingHeaders(gfeMissingHeaders.capture()); - assertThat(gfeMissingHeaders.getAllValues()).containsExactly(1L, 0L); - - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID); - assertThat(zone.getAllValues()).containsExactly("global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER); + Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE))); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); + + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData serverLatenciesMetricData = getMetricData(allMetricData, SERVER_LATENCIES_NAME); + + long serverLatencies = getAggregatedValue(serverLatenciesMetricData, expectedAttributes); + assertThat(serverLatencies).isEqualTo(FAKE_SERVER_TIMING); + + MetricData connectivityErrorCountMetricData = + getMetricData(allMetricData, CONNECTIVITY_ERROR_COUNT_NAME); + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + verifyAttributes(connectivityErrorCountMetricData, expected1); + verifyAttributes(connectivityErrorCountMetricData, expected2); + + assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected1)).isEqualTo(1); + assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected2)).isEqualTo(0); } @Test public void testReadRowsApplicationLatencyWithAutoFlowControl() throws Exception { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - final SettableApiFuture future = SettableApiFuture.create(); final AtomicInteger counter = new AtomicInteger(0); // For auto flow control, application latency is the time application spent in onResponse. stub.readRowsCallable() .call( - Query.create(TABLE_ID), + Query.create(TABLE), new ResponseObserver() { @Override public void onStart(StreamController streamController) {} @@ -383,37 +418,38 @@ public void onComplete() { }); future.get(); - verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture()); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(counter.get()).isEqualTo(fakeService.getResponseCounter().get()); - // Thread.sleep might not sleep for the requested amount depending on the interrupt period - // defined by the OS. - // On linux this is ~1ms but on windows may be as high as 15-20ms. - assertThat(applicationLatency.getValue()) - .isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get()); - assertThat(applicationLatency.getValue()) - .isAtMost(operationLatency.getValue() - SERVER_LATENCY); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = + getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); + long value = getAggregatedValue(applicationLatency, expectedAttributes); + + assertThat(value).isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get()); + + MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long operationLatencyValue = + getAggregatedValue( + operationLatency, + expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build()); + assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY); } @Test public void testReadRowsApplicationLatencyWithManualFlowControl() throws Exception { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); int counter = 0; - Iterator rows = stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator(); + Iterator rows = stub.readRowsCallable().call(Query.create(TABLE)).iterator(); while (rows.hasNext()) { counter++; @@ -421,148 +457,189 @@ public void testReadRowsApplicationLatencyWithManualFlowControl() throws Excepti rows.next(); } - verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture()); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = + getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); - // For manual flow control, the last application latency shouldn't count, because at that point - // the server already sent back all the responses. + long value = getAggregatedValue(applicationLatency, expectedAttributes); + // For manual flow control, the last application latency shouldn't count, because at that + // point the server already sent back all the responses. assertThat(counter).isEqualTo(fakeService.getResponseCounter().get()); - assertThat(applicationLatency.getValue()) - .isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY); - assertThat(applicationLatency.getValue()) - .isAtMost(operationLatency.getValue() - SERVER_LATENCY); + assertThat(value).isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY); + + MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long operationLatencyValue = + getAggregatedValue( + operationLatency, + expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build()); + assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY); } @Test - public void testRetryCount() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "MutateRow"), - statsRecorderWrapper)); - - ArgumentCaptor retryCount = ArgumentCaptor.forClass(Integer.class); - + public void testRetryCount() throws InterruptedException { stub.mutateRowCallable() - .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value")); - - // In TracedUnaryCallable, we create a future and add a TraceFinisher to the callback. Main - // thread is blocked on waiting for the future to be completed. When onComplete is called on - // the grpc thread, the future is completed, however we might not have enough time for - // TraceFinisher to run. Add a 1 second time out to wait for the callback. This shouldn't have - // any impact on production code. - verify(statsRecorderWrapper, timeout(1000)).putRetryCount(retryCount.capture()); + .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value")); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, RETRY_COUNT_NAME); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(STATUS_KEY, "OK") + .build(); - assertThat(retryCount.getValue()).isEqualTo(fakeService.getAttemptCounter().get() - 1); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isEqualTo(fakeService.getAttemptCounter().get() - 1); } @Test public void testMutateRowAttemptsTagValues() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper)); - stub.mutateRowCallable() - .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value")); - - // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set - // attempt succeeded and set the response which will call complete() in AbstractFuture which - // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be - // called after the mutateRow call is returned. So there's a race between when the call returns - // and when the record() is called in onOperationCompletion(). - verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(zone.getAllValues()).containsExactly("global", "global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", CLUSTER); - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "UNAVAILABLE", "OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID, TABLE_ID); + .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value")); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); + + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); + + verifyAttributes(metricData, expected1); + verifyAttributes(metricData, expected2); } @Test public void testMutateRowsPartialError() throws InterruptedException { + Batcher batcher = stub.newMutateRowsBatcher(TableId.of(TABLE), null); int numMutations = 6; - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - - Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null); for (int i = 0; i < numMutations; i++) { String key = i % 2 == 0 ? "key" : "fail-key"; batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v")); } - assertThrows(BatchingException.class, () -> batcher.close()); - - int expectedNumRequests = numMutations / batchElementCount; - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Assert.assertThrows(BatchingException.class, batcher::close); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); - assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER); - assertThat(status.getAllValues()).containsExactly("OK", "OK", "OK"); + verifyAttributes(metricData, expected); } @Test public void testMutateRowsRpcError() { + Batcher batcher = + stub.newMutateRowsBatcher(TableId.of(BAD_TABLE_ID), null); int numMutations = 6; - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - - Batcher batcher = stub.newMutateRowsBatcher(BAD_TABLE_ID, null); for (int i = 0; i < numMutations; i++) { - batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v")); + String key = i % 2 == 0 ? "key" : "fail-key"; + batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v")); } - assertThrows(BatchingException.class, () -> batcher.close()); - - int expectedNumRequests = numMutations / batchElementCount; - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Assert.assertThrows(BatchingException.class, batcher::close); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "NOT_FOUND") + .put(TABLE_ID_KEY, BAD_TABLE_ID) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); - assertThat(zone.getAllValues()).containsExactly("global", "global", "global"); - assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", "unspecified"); - assertThat(status.getAllValues()).containsExactly("NOT_FOUND", "NOT_FOUND", "NOT_FOUND"); + verifyAttributes(metricData, expected); } @Test public void testReadRowsAttemptsTagValues() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - Lists.newArrayList(stub.readRowsCallable().call(Query.create("fake-table")).iterator()); - // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set - // attempt succeeded and set the response which will call complete() in AbstractFuture which - // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be - // called after the mutateRow call is returned. So there's a race between when the call returns - // and when the record() is called in onOperationCompletion(). - verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(zone.getAllValues()).containsExactly("global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER); - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK"); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, true) + .build(); + + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, true) + .build(); + + verifyAttributes(metricData, expected1); + verifyAttributes(metricData, expected2); } @Test public void testBatchBlockingLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - try (Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null)) { + try (Batcher batcher = stub.newMutateRowsBatcher(TABLE, null)) { for (int i = 0; i < 6; i++) { batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v")); } @@ -571,86 +648,100 @@ public void testBatchBlockingLatencies() throws InterruptedException { batcher.close(); int expectedNumRequests = 6 / batchElementCount; - ArgumentCaptor throttledTime = ArgumentCaptor.forClass(Long.class); - verify(statsRecorderWrapper, timeout(1000).times(expectedNumRequests)) - .putClientBlockingLatencies(throttledTime.capture()); - // After the first request is sent, batcher will block on add because of the server latency. - // Blocking latency should be around server latency. - assertThat(throttledTime.getAllValues().get(1)).isAtLeast(SERVER_LATENCY - 10); - assertThat(throttledTime.getAllValues().get(2)).isAtLeast(SERVER_LATENCY - 10); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER); + long value = getAggregatedValue(applicationLatency, expectedAttributes); + // After the first request is sent, batcher will block on add because of the server latency. + // Blocking latency should be around server latency. So each data point would be at least + // (SERVER_LATENCY - 10). + long expected = (SERVER_LATENCY - 10) * (expectedNumRequests - 1) / expectedNumRequests; + assertThat(value).isAtLeast(expected); } } @Test - public void testQueuedOnChannelServerStreamLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - stub.readRowsCallable().all().call(Query.create(TABLE_ID)); - - ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class); - - verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get())) - .putClientBlockingLatencies(blockedTime.capture()); + public void testQueuedOnChannelServerStreamLatencies() { + stub.readRowsCallable().all().call(Query.create(TABLE)); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); + + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(ZONE_ID_KEY, ZONE) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY); + long value = getAggregatedValue(clientLatency, attributes); + assertThat(value).isAtLeast(CHANNEL_BLOCKING_LATENCY); } @Test - public void testQueuedOnChannelUnaryLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper)); - stub.mutateRowCallable().call(RowMutation.create(TABLE_ID, "a-key").setCell("f", "q", "v")); + public void testQueuedOnChannelUnaryLatencies() { - ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class); + stub.mutateRowCallable().call(RowMutation.create(TABLE, "a-key").setCell("f", "q", "v")); - verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get())) - .putClientBlockingLatencies(blockedTime.capture()); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); - assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY); - assertThat(blockedTime.getAllValues().get(2)).isAtLeast(CHANNEL_BLOCKING_LATENCY); + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(ZONE_ID_KEY, ZONE) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + long expected = CHANNEL_BLOCKING_LATENCY * 2 / 3; + long actual = getAggregatedValue(clientLatency, attributes); + assertThat(actual).isAtLeast(expected); } @Test public void testPermanentFailure() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - try { Lists.newArrayList(stub.readRowsCallable().call(Query.create(BAD_TABLE_ID)).iterator()); Assert.fail("Request should throw not found error"); } catch (NotFoundException e) { } - ArgumentCaptor attemptLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData attemptLatency = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "NOT_FOUND") + .put(TABLE_ID_KEY, BAD_TABLE_ID) + .put(CLUSTER_ID_KEY, "unspecified") + .put(ZONE_ID_KEY, "global") + .put(STREAMING_KEY, true) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - verify(statsRecorderWrapper, timeout(50)).putAttemptLatencies(attemptLatency.capture()); - verify(statsRecorderWrapper, timeout(50)).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper, timeout(50)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + verifyAttributes(attemptLatency, expected); - assertThat(status.getValue()).isEqualTo("NOT_FOUND"); - assertThat(tableId.getValue()).isEqualTo(BAD_TABLE_ID); - assertThat(cluster.getValue()).isEqualTo("unspecified"); - assertThat(zone.getValue()).isEqualTo("global"); + MetricData opLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + verifyAttributes(opLatency, expected); } private static class FakeService extends BigtableGrpc.BigtableImplBase { diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java index a6670182b8..4ab19a5337 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java @@ -23,17 +23,29 @@ import com.google.api.gax.grpc.ChannelPoolSettings; import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; import com.google.bigtable.v2.*; +import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.FakeServiceBuilder; import com.google.cloud.bigtable.data.v2.models.*; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection; import io.grpc.Server; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.util.ArrayList; +import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import org.junit.After; import org.junit.Before; @@ -51,25 +63,50 @@ public class ErrorCountPerConnectionTest { private final FakeService fakeService = new FakeService(); private EnhancedBigtableStubSettings.Builder builder; private ArgumentCaptor runnableCaptor; - private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection; + + private InMemoryMetricReader metricReader; + + private Attributes attributes; @Before public void setup() throws Exception { server = FakeServiceBuilder.create(fakeService).start(); ScheduledExecutorService executors = Mockito.mock(ScheduledExecutorService.class); + + attributes = + Attributes.builder() + .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, "fake-project") + .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, "fake-instance") + .put(BuiltinMetricsConstants.APP_PROFILE_KEY, "") + .put(BuiltinMetricsConstants.CLIENT_NAME_KEY, "bigtable-java/" + Version.VERSION) + .build(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + meterProvider.registerView(entry.getKey(), entry.getValue()); + } + + OpenTelemetrySdk otel = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + builder = BigtableDataSettings.newBuilderForEmulator(server.getPort()) .stubSettings() .setBackgroundExecutorProvider(FixedExecutorProvider.create(executors)) .setProjectId("fake-project") - .setInstanceId("fake-instance"); + .setInstanceId("fake-instance") + .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(otel)); + runnableCaptor = ArgumentCaptor.forClass(Runnable.class); Mockito.when( executors.scheduleAtFixedRate(runnableCaptor.capture(), anyLong(), anyLong(), any())) .thenReturn(null); - - statsRecorderWrapperForConnection = Mockito.mock(StatsRecorderWrapperForConnection.class); } @After @@ -98,14 +135,21 @@ public void readWithOneChannel() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); + runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); + + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + + // Make sure the correct bucket is updated with the correct number of data points + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index = findDataPointIndex(point.getBoundaries(), errorCount); + assertThat(point.getCounts().get(index)).isEqualTo(1); } @Test @@ -131,28 +175,35 @@ public void readWithTwoChannels() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(2); - // Requests get assigned to channels using a Round Robin algorithm, so half to each. - assertThat(allErrorCounts).containsExactly(totalErrorCount / 2, totalErrorCount / 2); + long errorCountPerChannel = totalErrorCount / 2; + + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + + // The 2 channels should get equal amount of errors, so the totalErrorCount / 2 bucket is + // updated twice. + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index = findDataPointIndex(point.getBoundaries(), errorCountPerChannel); + assertThat(point.getCounts().get(index)).isEqualTo(2); } @Test public void readOverTwoPeriods() throws Exception { EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build()); - long errorCount = 0; + long errorCount1 = 0; for (int i = 0; i < 20; i++) { Query query; if (i % 3 == 0) { query = Query.create(ERROR_TABLE_NAME); - errorCount += 1; + errorCount1 += 1; } else { query = Query.create(SUCCESS_TABLE_NAME); } @@ -162,16 +213,9 @@ public void readOverTwoPeriods() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); - runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); - errorCount = 0; + runInterceptorTasksAndAssertCount(); + long errorCount2 = 0; for (int i = 0; i < 20; i++) { Query query; @@ -179,7 +223,7 @@ public void readOverTwoPeriods() throws Exception { query = Query.create(SUCCESS_TABLE_NAME); } else { query = Query.create(ERROR_TABLE_NAME); - errorCount += 1; + errorCount2 += 1; } try { stub.readRowsCallable().call(query).iterator().hasNext(); @@ -187,27 +231,22 @@ public void readOverTwoPeriods() throws Exception { // noop } } - errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); + runInterceptorTasksAndAssertCount(); - allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); - } - @Test - public void ignoreInactiveConnection() throws Exception { - EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build()); + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); - runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts).isEmpty(); + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index1 = findDataPointIndex(point.getBoundaries(), errorCount1); + int index2 = findDataPointIndex(point.getBoundaries(), errorCount2); + assertThat(point.getCounts().get(index1)).isEqualTo(1); + assertThat(point.getCounts().get(index2)).isEqualTo(1); } @Test @@ -221,22 +260,19 @@ public void noFailedRequests() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(0); + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + long value = BuiltinMetricsTestUtils.getAggregatedValue(metricData, attributes); + assertThat(value).isEqualTo(0); } private void runInterceptorTasksAndAssertCount() { int actualNumOfTasks = 0; for (Runnable runnable : runnableCaptor.getAllValues()) { if (runnable instanceof ErrorCountPerConnectionMetricTracker) { - ((ErrorCountPerConnectionMetricTracker) runnable) - .setStatsRecorderWrapperForConnection(statsRecorderWrapperForConnection); runnable.run(); actualNumOfTasks++; } @@ -244,6 +280,16 @@ private void runInterceptorTasksAndAssertCount() { assertThat(actualNumOfTasks).isEqualTo(1); } + private int findDataPointIndex(List boundaries, long dataPoint) { + int index = 0; + for (; index < boundaries.size(); index++) { + if (boundaries.get(index) >= dataPoint) { + break; + } + } + return index; + } + static class FakeService extends BigtableGrpc.BigtableImplBase { @Override public void readRows( diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java index 15bd9171f0..d72eac4056 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java @@ -39,7 +39,6 @@ import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; -import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableMap; @@ -120,15 +119,20 @@ public void setUp() throws Exception { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings stubSettings = - settings - .getStubSettings() + + ClientContext clientContext = + EnhancedBigtableStub.createClientContext(settings.getStubSettings()); + clientContext = + clientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder())) + settings.getStubSettings(), + Tags.getTagger(), + localStats.getStatsRecorder(), + null)) .build(); - stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings)); + stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext); } @After diff --git a/pom.xml b/pom.xml index f193e7e852..c1c9404fa0 100644 --- a/pom.xml +++ b/pom.xml @@ -347,22 +347,6 @@ - - - - with-shaded - - - !skip-shaded - - - - google-cloud-bigtable-stats - -