com.google.truth
truth
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
index c35500a189..45ec5af814 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
@@ -19,6 +19,7 @@
import com.google.api.gax.core.BackgroundResource;
import com.google.api.gax.rpc.ClientContext;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
+import io.opentelemetry.api.OpenTelemetry;
import java.io.IOException;
import javax.annotation.Nonnull;
@@ -64,6 +65,7 @@
public final class BigtableDataClientFactory implements AutoCloseable {
private final BigtableDataSettings defaultSettings;
private final ClientContext sharedClientContext;
+ private final OpenTelemetry openTelemetry;
/**
* Create a instance of this factory.
@@ -75,13 +77,21 @@ public static BigtableDataClientFactory create(BigtableDataSettings defaultSetti
throws IOException {
ClientContext sharedClientContext =
EnhancedBigtableStub.createClientContext(defaultSettings.getStubSettings());
- return new BigtableDataClientFactory(sharedClientContext, defaultSettings);
+ OpenTelemetry openTelemetry =
+ EnhancedBigtableStub.getOpenTelemetry(
+ defaultSettings.getProjectId(),
+ defaultSettings.getMetricsProvider(),
+ sharedClientContext.getCredentials());
+ return new BigtableDataClientFactory(sharedClientContext, defaultSettings, openTelemetry);
}
private BigtableDataClientFactory(
- ClientContext sharedClientContext, BigtableDataSettings defaultSettings) {
+ ClientContext sharedClientContext,
+ BigtableDataSettings defaultSettings,
+ OpenTelemetry openTelemetry) {
this.sharedClientContext = sharedClientContext;
this.defaultSettings = defaultSettings;
+ this.openTelemetry = openTelemetry;
}
/**
@@ -112,7 +122,7 @@ public BigtableDataClient createDefault() {
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- defaultSettings.getStubSettings()))
+ defaultSettings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(defaultSettings, clientContext);
@@ -140,7 +150,8 @@ public BigtableDataClient createForAppProfile(@Nonnull String appProfileId) thro
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
}
@@ -168,7 +179,8 @@ public BigtableDataClient createForInstance(@Nonnull String projectId, @Nonnull
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
@@ -197,7 +209,8 @@ public BigtableDataClient createForInstance(
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
index 701a5e8e49..928159aa6d 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
@@ -25,19 +25,16 @@
import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider;
import com.google.api.gax.rpc.UnaryCallSettings;
import com.google.auth.Credentials;
-import com.google.auth.oauth2.GoogleCredentials;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.stub.BigtableBatchingCallSettings;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.BigtableStackdriverStatsExporter;
-import com.google.cloud.bigtable.stats.BuiltinViews;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.common.base.MoreObjects;
import com.google.common.base.Strings;
import io.grpc.ManagedChannelBuilder;
import java.io.IOException;
import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
@@ -77,7 +74,10 @@ public final class BigtableDataSettings {
private static final Logger LOGGER = Logger.getLogger(BigtableDataSettings.class.getName());
private static final String BIGTABLE_EMULATOR_HOST_ENV_VAR = "BIGTABLE_EMULATOR_HOST";
- private static final AtomicBoolean BUILTIN_METRICS_REGISTERED = new AtomicBoolean(false);
+ // This is the legacy credential override used in the deprecated enableBuiltinMetrics method to
+ // override the default credentials set on the Bigtable client. Keeping it for backward
+ // compatibility.
+ @Deprecated @Nullable private static Credentials legacyMetricCredentialOverride;
private final EnhancedBigtableStubSettings stubSettings;
@@ -197,23 +197,34 @@ public static void enableGfeOpenCensusStats() {
com.google.cloud.bigtable.data.v2.stub.metrics.RpcViews.registerBigtableClientGfeViews();
}
- /** Register built in metrics. */
- public static void enableBuiltinMetrics() throws IOException {
- if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) {
- BuiltinViews.registerBigtableBuiltinViews();
- BigtableStackdriverStatsExporter.register(GoogleCredentials.getApplicationDefault());
- }
- }
+ /**
+ * Register built in metrics.
+ *
+ * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default
+ * now. Please refer to {@link
+ * BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} on how to enable or
+ * disable built-in metrics.
+ */
+ @Deprecated
+ public static void enableBuiltinMetrics() throws IOException {}
/**
* Register built in metrics with credentials. The credentials need to have metric write access
* for all the projects you're publishing to.
+ *
+ * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default
+ * now. Please refer {@link BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}
+ * on how to enable or disable built-in metrics.
*/
+ @Deprecated
public static void enableBuiltinMetrics(Credentials credentials) throws IOException {
- if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) {
- BuiltinViews.registerBigtableBuiltinViews();
- BigtableStackdriverStatsExporter.register(credentials);
- }
+ BigtableDataSettings.legacyMetricCredentialOverride = credentials;
+ }
+
+ /** Get the metrics credentials if it's set by {@link #enableBuiltinMetrics(Credentials)}. */
+ @InternalApi
+ public static Credentials getMetricsCredentials() {
+ return legacyMetricCredentialOverride;
}
/** Returns the target project id. */
@@ -278,6 +289,11 @@ public boolean isBulkMutationFlowControlEnabled() {
return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled();
}
+ /** Gets the {@link MetricsProvider}. * */
+ public MetricsProvider getMetricsProvider() {
+ return stubSettings.getMetricsProvider();
+ }
+
/** Returns the underlying RPC settings. */
public EnhancedBigtableStubSettings getStubSettings() {
return stubSettings;
@@ -527,6 +543,30 @@ public boolean isBulkMutationFlowControlEnabled() {
return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled();
}
+ /**
+ * Sets the {@link MetricsProvider}.
+ *
+ * By default, this is set to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will
+ * collect and export client side metrics.
+ *
+ *
To disable client side metrics, set it to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}.
+ *
+ *
To use a custom OpenTelemetry instance, refer to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to
+ * set it up.
+ */
+ public Builder setMetricsProvider(MetricsProvider metricsProvider) {
+ stubSettings.setMetricsProvider(metricsProvider);
+ return this;
+ }
+
+ /** Gets the {@link MetricsProvider}. */
+ public MetricsProvider getMetricsProvider() {
+ return stubSettings.getMetricsProvider();
+ }
+
/**
* Returns the underlying settings for making RPC calls. The settings should be changed with
* care.
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
index ec15c4131a..f0aa852338 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
@@ -15,6 +15,11 @@
*/
package com.google.cloud.bigtable.data.v2.stub;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+
import com.google.api.core.ApiFunction;
import com.google.api.core.BetaApi;
import com.google.api.core.InternalApi;
@@ -68,6 +73,7 @@
import com.google.bigtable.v2.RowRange;
import com.google.bigtable.v2.SampleRowKeysResponse;
import com.google.cloud.bigtable.Version;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience;
import com.google.cloud.bigtable.data.v2.internal.NameUtil;
import com.google.cloud.bigtable.data.v2.internal.RequestContext;
@@ -97,8 +103,12 @@
import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable;
import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory;
import com.google.cloud.bigtable.data.v2.stub.metrics.CompositeTracerFactory;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
+import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.ErrorCountPerConnectionMetricTracker;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsTracerFactory;
+import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants;
import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersServerStreamingCallable;
import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersUnaryCallable;
@@ -130,6 +140,8 @@
import io.opencensus.tags.TagValue;
import io.opencensus.tags.Tagger;
import io.opencensus.tags.Tags;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@@ -185,10 +197,17 @@ public class EnhancedBigtableStub implements AutoCloseable {
public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings)
throws IOException {
- settings = settings.toBuilder().setTracerFactory(createBigtableTracerFactory(settings)).build();
ClientContext clientContext = createClientContext(settings);
+ OpenTelemetry openTelemetry =
+ getOpenTelemetry(
+ settings.getProjectId(), settings.getMetricsProvider(), clientContext.getCredentials());
+ ClientContext contextWithTracer =
+ clientContext
+ .toBuilder()
+ .setTracerFactory(createBigtableTracerFactory(settings, openTelemetry))
+ .build();
- return new EnhancedBigtableStub(settings, clientContext);
+ return new EnhancedBigtableStub(settings, contextWithTracer);
}
public static EnhancedBigtableStub createWithClientContext(
@@ -207,15 +226,26 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
// workaround JWT audience issues
patchCredentials(builder);
+ // Fix the credentials so that they can be shared
+ Credentials credentials = null;
+ if (builder.getCredentialsProvider() != null) {
+ credentials = builder.getCredentialsProvider().getCredentials();
+ }
+ builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials));
+
InstantiatingGrpcChannelProvider.Builder transportProvider =
builder.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider
? ((InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider()).toBuilder()
: null;
+ OpenTelemetry openTelemetry =
+ getOpenTelemetry(settings.getProjectId(), settings.getMetricsProvider(), credentials);
ErrorCountPerConnectionMetricTracker errorCountPerConnectionMetricTracker;
- if (transportProvider != null) {
+ // Skip setting up ErrorCountPerConnectionMetricTracker if openTelemetry is null
+ if (openTelemetry != null && transportProvider != null) {
errorCountPerConnectionMetricTracker =
- new ErrorCountPerConnectionMetricTracker(createBuiltinAttributes(builder));
+ new ErrorCountPerConnectionMetricTracker(
+ openTelemetry, createBuiltinAttributes(settings));
ApiFunction oldChannelConfigurator =
transportProvider.getChannelConfigurator();
transportProvider.setChannelConfigurator(
@@ -237,12 +267,6 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
// Inject channel priming
if (settings.isRefreshingChannel()) {
- // Fix the credentials so that they can be shared
- Credentials credentials = null;
- if (builder.getCredentialsProvider() != null) {
- credentials = builder.getCredentialsProvider().getCredentials();
- }
- builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials));
if (transportProvider != null) {
transportProvider.setChannelPrimer(
@@ -267,13 +291,18 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
}
public static ApiTracerFactory createBigtableTracerFactory(
- EnhancedBigtableStubSettings settings) {
- return createBigtableTracerFactory(settings, Tags.getTagger(), Stats.getStatsRecorder());
+ EnhancedBigtableStubSettings settings, OpenTelemetry openTelemetry) throws IOException {
+ return createBigtableTracerFactory(
+ settings, Tags.getTagger(), Stats.getStatsRecorder(), openTelemetry);
}
@VisibleForTesting
public static ApiTracerFactory createBigtableTracerFactory(
- EnhancedBigtableStubSettings settings, Tagger tagger, StatsRecorder stats) {
+ EnhancedBigtableStubSettings settings,
+ Tagger tagger,
+ StatsRecorder stats,
+ OpenTelemetry openTelemetry)
+ throws IOException {
String projectId = settings.getProjectId();
String instanceId = settings.getInstanceId();
String appProfileId = settings.getAppProfileId();
@@ -284,10 +313,10 @@ public static ApiTracerFactory createBigtableTracerFactory(
.put(RpcMeasureConstants.BIGTABLE_INSTANCE_ID, TagValue.create(instanceId))
.put(RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID, TagValue.create(appProfileId))
.build();
- ImmutableMap builtinAttributes = createBuiltinAttributes(settings.toBuilder());
- return new CompositeTracerFactory(
- ImmutableList.of(
+ ImmutableList.Builder tracerFactories = ImmutableList.builder();
+ tracerFactories
+ .add(
// Add OpenCensus Tracing
new OpencensusTracerFactory(
ImmutableMap.builder()
@@ -299,22 +328,52 @@ public static ApiTracerFactory createBigtableTracerFactory(
.put("gax", GaxGrpcProperties.getGaxGrpcVersion())
.put("grpc", GaxGrpcProperties.getGrpcVersion())
.put("gapic", Version.VERSION)
- .build()),
- // Add OpenCensus Metrics
- MetricsTracerFactory.create(tagger, stats, attributes),
- BuiltinMetricsTracerFactory.create(builtinAttributes),
- // Add user configured tracer
- settings.getTracerFactory()));
+ .build()))
+ // Add OpenCensus Metrics
+ .add(MetricsTracerFactory.create(tagger, stats, attributes))
+ // Add user configured tracer
+ .add(settings.getTracerFactory());
+ BuiltinMetricsTracerFactory builtinMetricsTracerFactory =
+ openTelemetry != null
+ ? BuiltinMetricsTracerFactory.create(openTelemetry, createBuiltinAttributes(settings))
+ : null;
+ if (builtinMetricsTracerFactory != null) {
+ tracerFactories.add(builtinMetricsTracerFactory);
+ }
+ return new CompositeTracerFactory(tracerFactories.build());
+ }
+
+ @Nullable
+ public static OpenTelemetry getOpenTelemetry(
+ String projectId, MetricsProvider metricsProvider, @Nullable Credentials defaultCredentials)
+ throws IOException {
+ if (metricsProvider instanceof CustomOpenTelemetryMetricsProvider) {
+ CustomOpenTelemetryMetricsProvider customMetricsProvider =
+ (CustomOpenTelemetryMetricsProvider) metricsProvider;
+ return customMetricsProvider.getOpenTelemetry();
+ } else if (metricsProvider instanceof DefaultMetricsProvider) {
+ Credentials credentials =
+ BigtableDataSettings.getMetricsCredentials() != null
+ ? BigtableDataSettings.getMetricsCredentials()
+ : defaultCredentials;
+ DefaultMetricsProvider defaultMetricsProvider = (DefaultMetricsProvider) metricsProvider;
+ return defaultMetricsProvider.getOpenTelemetry(projectId, credentials);
+ } else if (metricsProvider instanceof NoopMetricsProvider) {
+ return null;
+ }
+ throw new IOException("Invalid MetricsProvider type " + metricsProvider);
}
- private static ImmutableMap createBuiltinAttributes(
- EnhancedBigtableStubSettings.Builder builder) {
- return ImmutableMap.builder()
- .put("project_id", builder.getProjectId())
- .put("instance", builder.getInstanceId())
- .put("app_profile", builder.getAppProfileId())
- .put("client_name", "bigtable-java/" + Version.VERSION)
- .build();
+ private static Attributes createBuiltinAttributes(EnhancedBigtableStubSettings settings) {
+ return Attributes.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ settings.getProjectId(),
+ INSTANCE_ID_KEY,
+ settings.getInstanceId(),
+ APP_PROFILE_KEY,
+ settings.getAppProfileId(),
+ CLIENT_NAME_KEY,
+ "bigtable-java/" + Version.VERSION);
}
private static void patchCredentials(EnhancedBigtableStubSettings.Builder settings)
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
index 9a5027c740..f07a8fb7fc 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
@@ -44,6 +44,8 @@
import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor;
import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor;
import com.google.common.base.MoreObjects;
@@ -229,6 +231,8 @@ public class EnhancedBigtableStubSettings extends StubSettings getJwtAudienceMapping() {
return jwtAudienceMapping;
}
+ public MetricsProvider getMetricsProvider() {
+ return metricsProvider;
+ }
+
/**
* Gets if routing cookie is enabled. If true, client will retry a request with extra metadata
* server sent back.
@@ -636,6 +645,8 @@ public static class Builder extends StubSettings.Builder jwtAudienceMapping) {
return this;
}
+ /**
+ * Sets the {@link MetricsProvider}.
+ *
+ * By default, this is set to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will
+ * collect and export client side metrics.
+ *
+ *
To disable client side metrics, set it to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}.
+ *
+ *
To use a custom OpenTelemetry instance, refer to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to
+ * set it up.
+ */
+ public Builder setMetricsProvider(MetricsProvider metricsProvider) {
+ this.metricsProvider = Preconditions.checkNotNull(metricsProvider);
+ return this;
+ }
+
+ /** Gets the {@link MetricsProvider}. */
+ public MetricsProvider getMetricsProvider() {
+ return this.metricsProvider;
+ }
+
@InternalApi("Used for internal testing")
public Map getJwtAudienceMapping() {
return jwtAudienceMapping;
@@ -1028,6 +1067,11 @@ public EnhancedBigtableStubSettings build() {
featureFlags.setRoutingCookie(this.getEnableRoutingCookie());
featureFlags.setRetryInfo(this.getEnableRetryInfo());
+ // client_Side_metrics_enabled feature flag is only set when a user is running with a
+ // DefaultMetricsProvider. This may cause false negatives when a user registered the
+ // metrics on their CustomOpenTelemetryMetricsProvider.
+ featureFlags.setClientSideMetricsEnabled(
+ this.getMetricsProvider() instanceof DefaultMetricsProvider);
// Serialize the web64 encode the bigtable feature flags
ByteArrayOutputStream boas = new ByteArrayOutputStream();
@@ -1080,6 +1124,7 @@ public String toString() {
generateInitialChangeStreamPartitionsSettings)
.add("readChangeStreamSettings", readChangeStreamSettings)
.add("pingAndWarmSettings", pingAndWarmSettings)
+ .add("metricsProvider", metricsProvider)
.add("parent", super.toString())
.toString();
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
index 6208fce89e..97cc2f73ec 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
@@ -86,7 +86,7 @@ public void call(
stopwatch.stop();
if (context.getTracer() instanceof BigtableTracer) {
((BigtableTracer) context.getTracer())
- .batchRequestThrottled(stopwatch.elapsed(TimeUnit.MILLISECONDS));
+ .batchRequestThrottled(stopwatch.elapsed(TimeUnit.NANOSECONDS));
}
RateLimitingResponseObserver innerObserver =
new RateLimitingResponseObserver(limiter, lastQpsChangeTime, responseObserver);
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java
new file mode 100644
index 0000000000..d3f88b88c2
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java
@@ -0,0 +1,356 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+
+import com.google.api.MonitoredResource;
+import com.google.api.core.ApiFuture;
+import com.google.api.core.ApiFutureCallback;
+import com.google.api.core.ApiFutures;
+import com.google.api.core.InternalApi;
+import com.google.api.gax.core.CredentialsProvider;
+import com.google.api.gax.core.FixedCredentialsProvider;
+import com.google.api.gax.core.NoCredentialsProvider;
+import com.google.auth.Credentials;
+import com.google.cloud.monitoring.v3.MetricServiceClient;
+import com.google.cloud.monitoring.v3.MetricServiceSettings;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.monitoring.v3.CreateTimeSeriesRequest;
+import com.google.monitoring.v3.ProjectName;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Empty;
+import io.opentelemetry.sdk.common.CompletableResultCode;
+import io.opentelemetry.sdk.metrics.InstrumentType;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+import org.threeten.bp.Duration;
+
+/**
+ * Bigtable Cloud Monitoring OpenTelemetry Exporter.
+ *
+ * The exporter will look for all bigtable owned metrics under bigtable.googleapis.com
+ * instrumentation scope and upload it via the Google Cloud Monitoring API.
+ */
+@InternalApi
+public final class BigtableCloudMonitoringExporter implements MetricExporter {
+
+ private static final Logger logger =
+ Logger.getLogger(BigtableCloudMonitoringExporter.class.getName());
+
+ // This system property can be used to override the monitoring endpoint
+ // to a different environment. It's meant for internal testing only.
+ private static final String MONITORING_ENDPOINT =
+ MoreObjects.firstNonNull(
+ System.getProperty("bigtable.test-monitoring-endpoint"),
+ MetricServiceSettings.getDefaultEndpoint());
+
+ private static String APPLICATION_RESOURCE_PROJECT_ID = "project_id";
+
+ private final MetricServiceClient client;
+
+ private final String bigtableProjectId;
+ private final String taskId;
+
+ // The resource the client application is running on
+ private final MonitoredResource applicationResource;
+
+ private final AtomicBoolean isShutdown = new AtomicBoolean(false);
+
+ private CompletableResultCode lastExportCode;
+
+ private static final ImmutableList BIGTABLE_TABLE_METRICS =
+ ImmutableSet.of(
+ OPERATION_LATENCIES_NAME,
+ ATTEMPT_LATENCIES_NAME,
+ SERVER_LATENCIES_NAME,
+ FIRST_RESPONSE_LATENCIES_NAME,
+ CLIENT_BLOCKING_LATENCIES_NAME,
+ APPLICATION_BLOCKING_LATENCIES_NAME,
+ RETRY_COUNT_NAME,
+ CONNECTIVITY_ERROR_COUNT_NAME)
+ .stream()
+ .map(m -> METER_NAME + m)
+ .collect(ImmutableList.toImmutableList());
+
+ private static final ImmutableList APPLICATION_METRICS =
+ ImmutableSet.of(PER_CONNECTION_ERROR_COUNT_NAME).stream()
+ .map(m -> METER_NAME + m)
+ .collect(ImmutableList.toImmutableList());
+
+ public static BigtableCloudMonitoringExporter create(
+ String projectId, @Nullable Credentials credentials) throws IOException {
+ MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder();
+ CredentialsProvider credentialsProvider =
+ Optional.ofNullable(credentials)
+ .map(FixedCredentialsProvider::create)
+ .orElse(NoCredentialsProvider.create());
+ settingsBuilder.setCredentialsProvider(credentialsProvider);
+ settingsBuilder.setEndpoint(MONITORING_ENDPOINT);
+
+ org.threeten.bp.Duration timeout = Duration.ofMinutes(1);
+ // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving
+ // it as not retried for now.
+ settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout);
+
+ // Detect the resource that the client application is running on. For example,
+ // this could be a GCE instance or a GKE pod. Currently, we only support GCE instance and
+ // GKE pod. This method will return null for everything else.
+ MonitoredResource applicationResource = BigtableExporterUtils.detectResource();
+
+ return new BigtableCloudMonitoringExporter(
+ projectId,
+ MetricServiceClient.create(settingsBuilder.build()),
+ applicationResource,
+ BigtableExporterUtils.getDefaultTaskValue());
+ }
+
+ @VisibleForTesting
+ BigtableCloudMonitoringExporter(
+ String projectId,
+ MetricServiceClient client,
+ @Nullable MonitoredResource applicationResource,
+ String taskId) {
+ this.client = client;
+ this.taskId = taskId;
+ this.applicationResource = applicationResource;
+ this.bigtableProjectId = projectId;
+ }
+
+ @Override
+ public CompletableResultCode export(Collection collection) {
+ if (isShutdown.get()) {
+ logger.log(Level.WARNING, "Exporter is shutting down");
+ return CompletableResultCode.ofFailure();
+ }
+
+ CompletableResultCode bigtableExportCode = exportBigtableResourceMetrics(collection);
+ CompletableResultCode applicationExportCode = exportApplicationResourceMetrics(collection);
+
+ lastExportCode =
+ CompletableResultCode.ofAll(ImmutableList.of(applicationExportCode, bigtableExportCode));
+
+ return lastExportCode;
+ }
+
+ /** Export metrics associated with a BigtableTable resource. */
+ private CompletableResultCode exportBigtableResourceMetrics(Collection collection) {
+ // Filter bigtable table metrics
+ List bigtableMetricData =
+ collection.stream()
+ .filter(md -> BIGTABLE_TABLE_METRICS.contains(md.getName()))
+ .collect(Collectors.toList());
+
+ // Skips exporting if there's none
+ if (bigtableMetricData.isEmpty()) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ // Verifies metrics project id are the same as the bigtable project id set on this client
+ if (!bigtableMetricData.stream()
+ .flatMap(metricData -> metricData.getData().getPoints().stream())
+ .allMatch(pd -> bigtableProjectId.equals(BigtableExporterUtils.getProjectId(pd)))) {
+ logger.log(Level.WARNING, "Metric data has different a projectId. Skip exporting.");
+ return CompletableResultCode.ofFailure();
+ }
+
+ List bigtableTimeSeries;
+ try {
+ bigtableTimeSeries =
+ BigtableExporterUtils.convertToBigtableTimeSeries(bigtableMetricData, taskId);
+ } catch (Throwable e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to convert bigtable table metric data to cloud monitoring timeseries.",
+ e);
+ return CompletableResultCode.ofFailure();
+ }
+
+ ProjectName projectName = ProjectName.of(bigtableProjectId);
+ CreateTimeSeriesRequest bigtableRequest =
+ CreateTimeSeriesRequest.newBuilder()
+ .setName(projectName.toString())
+ .addAllTimeSeries(bigtableTimeSeries)
+ .build();
+
+ ApiFuture future =
+ this.client.createServiceTimeSeriesCallable().futureCall(bigtableRequest);
+
+ CompletableResultCode bigtableExportCode = new CompletableResultCode();
+ ApiFutures.addCallback(
+ future,
+ new ApiFutureCallback() {
+ @Override
+ public void onFailure(Throwable throwable) {
+ logger.log(
+ Level.WARNING,
+ "createServiceTimeSeries request failed for bigtable metrics. ",
+ throwable);
+ bigtableExportCode.fail();
+ }
+
+ @Override
+ public void onSuccess(Empty empty) {
+ bigtableExportCode.succeed();
+ }
+ },
+ MoreExecutors.directExecutor());
+
+ return bigtableExportCode;
+ }
+
+ /** Export metrics associated with the resource the Application is running on. */
+ private CompletableResultCode exportApplicationResourceMetrics(
+ Collection collection) {
+ if (applicationResource == null) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ // Filter application level metrics
+ List metricData =
+ collection.stream()
+ .filter(md -> APPLICATION_METRICS.contains(md.getName()))
+ .collect(Collectors.toList());
+
+ // Skip exporting if there's none
+ if (metricData.isEmpty()) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ List timeSeries;
+ try {
+ timeSeries =
+ BigtableExporterUtils.convertToApplicationResourceTimeSeries(
+ metricData, taskId, applicationResource);
+ } catch (Throwable e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to convert application metric data to cloud monitoring timeseries.",
+ e);
+ return CompletableResultCode.ofFailure();
+ }
+
+ // Construct the request. The project id will be the project id of the detected monitored
+ // resource.
+ ApiFuture gceOrGkeFuture;
+ CompletableResultCode exportCode = new CompletableResultCode();
+ try {
+ ProjectName projectName =
+ ProjectName.of(applicationResource.getLabelsOrThrow(APPLICATION_RESOURCE_PROJECT_ID));
+ CreateTimeSeriesRequest request =
+ CreateTimeSeriesRequest.newBuilder()
+ .setName(projectName.toString())
+ .addAllTimeSeries(timeSeries)
+ .build();
+
+ gceOrGkeFuture = this.client.createServiceTimeSeriesCallable().futureCall(request);
+
+ ApiFutures.addCallback(
+ gceOrGkeFuture,
+ new ApiFutureCallback() {
+ @Override
+ public void onFailure(Throwable throwable) {
+ logger.log(
+ Level.WARNING,
+ "createServiceTimeSeries request failed for per connection error metrics.",
+ throwable);
+ exportCode.fail();
+ }
+
+ @Override
+ public void onSuccess(Empty empty) {
+ exportCode.succeed();
+ }
+ },
+ MoreExecutors.directExecutor());
+
+ } catch (Exception e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to get projectName for application resource " + applicationResource);
+ return CompletableResultCode.ofFailure();
+ }
+
+ return exportCode;
+ }
+
+ @Override
+ public CompletableResultCode flush() {
+ if (lastExportCode != null) {
+ return lastExportCode;
+ }
+ return CompletableResultCode.ofSuccess();
+ }
+
+ @Override
+ public CompletableResultCode shutdown() {
+ if (!isShutdown.compareAndSet(false, true)) {
+ logger.log(Level.WARNING, "shutdown is called multiple times");
+ return CompletableResultCode.ofSuccess();
+ }
+ CompletableResultCode flushResult = flush();
+ CompletableResultCode shutdownResult = new CompletableResultCode();
+ flushResult.whenComplete(
+ () -> {
+ Throwable throwable = null;
+ try {
+ client.shutdown();
+ } catch (Throwable e) {
+ logger.log(Level.WARNING, "failed to shutdown the monitoring client", e);
+ throwable = e;
+ }
+ if (throwable != null) {
+ shutdownResult.fail();
+ } else {
+ shutdownResult.succeed();
+ }
+ });
+ return CompletableResultCode.ofAll(Arrays.asList(flushResult, shutdownResult));
+ }
+
+ /**
+ * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a
+ * metric over time.
+ */
+ @Override
+ public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) {
+ return AggregationTemporality.CUMULATIVE;
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java
new file mode 100644
index 0000000000..9a4d928ce4
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java
@@ -0,0 +1,347 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.api.Distribution.BucketOptions;
+import static com.google.api.Distribution.BucketOptions.Explicit;
+import static com.google.api.MetricDescriptor.MetricKind;
+import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE;
+import static com.google.api.MetricDescriptor.MetricKind.GAUGE;
+import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED;
+import static com.google.api.MetricDescriptor.ValueType;
+import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION;
+import static com.google.api.MetricDescriptor.ValueType.DOUBLE;
+import static com.google.api.MetricDescriptor.ValueType.INT64;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+
+import com.google.api.Distribution;
+import com.google.api.Metric;
+import com.google.api.MonitoredResource;
+import com.google.cloud.opentelemetry.detection.AttributeKeys;
+import com.google.cloud.opentelemetry.detection.DetectedPlatform;
+import com.google.cloud.opentelemetry.detection.GCPPlatformDetector;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.monitoring.v3.Point;
+import com.google.monitoring.v3.TimeInterval;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.monitoring.v3.TypedValue;
+import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.DoublePointData;
+import io.opentelemetry.sdk.metrics.data.HistogramData;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.MetricDataType;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.metrics.data.SumData;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import javax.annotation.Nullable;
+
+/** Utils to convert OpenTelemetry types to Google Cloud Monitoring types. */
+class BigtableExporterUtils {
+
+ private static final Logger logger = Logger.getLogger(BigtableExporterUtils.class.getName());
+
+ private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw";
+
+ // These metric labels will be promoted to the bigtable_table monitored resource fields
+ private static final Set> BIGTABLE_PROMOTED_RESOURCE_LABELS =
+ ImmutableSet.of(
+ BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, TABLE_ID_KEY, CLUSTER_ID_KEY, ZONE_ID_KEY);
+
+ private BigtableExporterUtils() {}
+
+ /**
+ * In most cases this should look like java-${UUID}@${hostname}. The hostname will be retrieved
+ * from the jvm name and fallback to the local hostname.
+ */
+ static String getDefaultTaskValue() {
+ // Something like '@'
+ final String jvmName = ManagementFactory.getRuntimeMXBean().getName();
+ // If jvm doesn't have the expected format, fallback to the local hostname
+ if (jvmName.indexOf('@') < 1) {
+ String hostname = "localhost";
+ try {
+ hostname = InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException e) {
+ logger.log(Level.INFO, "Unable to get the hostname.", e);
+ }
+ // Generate a random number and use the same format "random_number@hostname".
+ return "java-" + UUID.randomUUID() + "@" + hostname;
+ }
+ return "java-" + UUID.randomUUID() + jvmName;
+ }
+
+ static String getProjectId(PointData pointData) {
+ return pointData.getAttributes().get(BIGTABLE_PROJECT_ID_KEY);
+ }
+
+ static List convertToBigtableTimeSeries(List collection, String taskId) {
+ List allTimeSeries = new ArrayList<>();
+
+ for (MetricData metricData : collection) {
+ if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) {
+ // Filter out metric data for instruments that are not part of the bigtable builtin metrics
+ continue;
+ }
+ metricData.getData().getPoints().stream()
+ .map(pointData -> convertPointToBigtableTimeSeries(metricData, pointData, taskId))
+ .forEach(allTimeSeries::add);
+ }
+
+ return allTimeSeries;
+ }
+
+ static List convertToApplicationResourceTimeSeries(
+ Collection collection, String taskId, MonitoredResource applicationResource) {
+ Preconditions.checkNotNull(
+ applicationResource,
+ "convert application metrics is called when the supported resource is not detected");
+ List allTimeSeries = new ArrayList<>();
+ for (MetricData metricData : collection) {
+ if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) {
+ // Filter out metric data for instruments that are not part of the bigtable builtin metrics
+ continue;
+ }
+ metricData.getData().getPoints().stream()
+ .map(
+ pointData ->
+ convertPointToApplicationResourceTimeSeries(
+ metricData, pointData, taskId, applicationResource))
+ .forEach(allTimeSeries::add);
+ }
+ return allTimeSeries;
+ }
+
+ @Nullable
+ static MonitoredResource detectResource() {
+ GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE;
+ DetectedPlatform detectedPlatform = detector.detectPlatform();
+ switch (detectedPlatform.getSupportedPlatform()) {
+ case GOOGLE_COMPUTE_ENGINE:
+ return createGceMonitoredResource(
+ detectedPlatform.getProjectId(), detectedPlatform.getAttributes());
+ case GOOGLE_KUBERNETES_ENGINE:
+ return createGkeMonitoredResource(
+ detectedPlatform.getProjectId(), detectedPlatform.getAttributes());
+ default:
+ return null;
+ }
+ }
+
+ private static MonitoredResource createGceMonitoredResource(
+ String projectId, Map attributes) {
+ return MonitoredResource.newBuilder()
+ .setType("gce_instance")
+ .putLabels("project_id", projectId)
+ .putLabels("instance_id", attributes.get(AttributeKeys.GCE_INSTANCE_ID))
+ .putLabels("zone", attributes.get(AttributeKeys.GCE_AVAILABILITY_ZONE))
+ .build();
+ }
+
+ private static MonitoredResource createGkeMonitoredResource(
+ String projectId, Map attributes) {
+ return MonitoredResource.newBuilder()
+ .setType("k8s_container")
+ .putLabels("project_id", projectId)
+ .putLabels("location", attributes.get(AttributeKeys.GKE_CLUSTER_LOCATION))
+ .putLabels("cluster_name", attributes.get(AttributeKeys.GKE_CLUSTER_NAME))
+ .putLabels("namespace_name", MoreObjects.firstNonNull(System.getenv("NAMESPACE"), ""))
+ .putLabels("pod_name", MoreObjects.firstNonNull(System.getenv("HOSTNAME"), ""))
+ .putLabels("container_name", MoreObjects.firstNonNull(System.getenv("CONTAINER_NAME"), ""))
+ .build();
+ }
+
+ private static TimeSeries convertPointToBigtableTimeSeries(
+ MetricData metricData, PointData pointData, String taskId) {
+ TimeSeries.Builder builder =
+ TimeSeries.newBuilder()
+ .setMetricKind(convertMetricKind(metricData))
+ .setValueType(convertValueType(metricData.getType()));
+ Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName());
+
+ Attributes attributes = pointData.getAttributes();
+ MonitoredResource.Builder monitoredResourceBuilder =
+ MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE);
+
+ for (AttributeKey> key : attributes.asMap().keySet()) {
+ if (BIGTABLE_PROMOTED_RESOURCE_LABELS.contains(key)) {
+ monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ } else {
+ metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ }
+ }
+
+ builder.setResource(monitoredResourceBuilder.build());
+
+ metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId);
+ builder.setMetric(metricBuilder.build());
+
+ TimeInterval timeInterval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos()))
+ .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos()))
+ .build();
+
+ builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval));
+
+ return builder.build();
+ }
+
+ private static TimeSeries convertPointToApplicationResourceTimeSeries(
+ MetricData metricData,
+ PointData pointData,
+ String taskId,
+ MonitoredResource applicationResource) {
+ TimeSeries.Builder builder =
+ TimeSeries.newBuilder()
+ .setMetricKind(convertMetricKind(metricData))
+ .setValueType(convertValueType(metricData.getType()))
+ .setResource(applicationResource);
+
+ Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName());
+
+ Attributes attributes = pointData.getAttributes();
+ for (AttributeKey> key : attributes.asMap().keySet()) {
+ metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ }
+
+ metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId);
+ builder.setMetric(metricBuilder.build());
+
+ TimeInterval timeInterval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos()))
+ .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos()))
+ .build();
+
+ builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval));
+ return builder.build();
+ }
+
+ private static MetricKind convertMetricKind(MetricData metricData) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return convertHistogramType(metricData.getHistogramData());
+ case LONG_GAUGE:
+ case DOUBLE_GAUGE:
+ return GAUGE;
+ case LONG_SUM:
+ return convertSumDataType(metricData.getLongSumData());
+ case DOUBLE_SUM:
+ return convertSumDataType(metricData.getDoubleSumData());
+ default:
+ return UNRECOGNIZED;
+ }
+ }
+
+ private static MetricKind convertHistogramType(HistogramData histogramData) {
+ if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) {
+ return CUMULATIVE;
+ }
+ return UNRECOGNIZED;
+ }
+
+ private static MetricKind convertSumDataType(SumData> sum) {
+ if (!sum.isMonotonic()) {
+ return GAUGE;
+ }
+ if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) {
+ return CUMULATIVE;
+ }
+ return UNRECOGNIZED;
+ }
+
+ private static ValueType convertValueType(MetricDataType metricDataType) {
+ switch (metricDataType) {
+ case LONG_GAUGE:
+ case LONG_SUM:
+ return INT64;
+ case DOUBLE_GAUGE:
+ case DOUBLE_SUM:
+ return DOUBLE;
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return DISTRIBUTION;
+ default:
+ return ValueType.UNRECOGNIZED;
+ }
+ }
+
+ private static Point createPoint(
+ MetricDataType type, PointData pointData, TimeInterval timeInterval) {
+ Point.Builder builder = Point.newBuilder().setInterval(timeInterval);
+ switch (type) {
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return builder
+ .setValue(
+ TypedValue.newBuilder()
+ .setDistributionValue(convertHistogramData((HistogramPointData) pointData))
+ .build())
+ .build();
+ case DOUBLE_GAUGE:
+ case DOUBLE_SUM:
+ return builder
+ .setValue(
+ TypedValue.newBuilder()
+ .setDoubleValue(((DoublePointData) pointData).getValue())
+ .build())
+ .build();
+ case LONG_GAUGE:
+ case LONG_SUM:
+ return builder
+ .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue()))
+ .build();
+ default:
+ logger.log(Level.WARNING, "unsupported metric type");
+ return builder.build();
+ }
+ }
+
+ private static Distribution convertHistogramData(HistogramPointData pointData) {
+ return Distribution.newBuilder()
+ .setCount(pointData.getCount())
+ .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount())
+ .setBucketOptions(
+ BucketOptions.newBuilder()
+ .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries())))
+ .addAllBucketCounts(pointData.getCounts())
+ .build();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
index 1cda49934c..3b2242385a 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
@@ -42,7 +42,7 @@ public void streamCreated(Attributes transportAttrs, Metadata headers) {
@Override
public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalUncompressedSize) {
- tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.MILLISECONDS));
+ tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.NANOSECONDS));
}
static class Factory extends ClientStreamTracer.Factory {
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java
new file mode 100644
index 0000000000..d85300828b
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalApi;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.sdk.metrics.Aggregation;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.InstrumentType;
+import io.opentelemetry.sdk.metrics.View;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/** Defining Bigtable builit-in metrics scope, attributes, metric names and views. */
+@InternalApi
+public class BuiltinMetricsConstants {
+
+ // Metric attribute keys for monitored resource
+ public static final AttributeKey BIGTABLE_PROJECT_ID_KEY =
+ AttributeKey.stringKey("project_id");
+ public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance");
+ public static final AttributeKey TABLE_ID_KEY = AttributeKey.stringKey("table");
+ public static final AttributeKey CLUSTER_ID_KEY = AttributeKey.stringKey("cluster");
+ public static final AttributeKey ZONE_ID_KEY = AttributeKey.stringKey("zone");
+
+ // Metric attribute keys for labels
+ // We need to access APP_PROFILE_KEY in EnhancedBigtableStubSettings and STREAMING_KEY in
+ // IT tests, so they're public.
+ public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile");
+ public static final AttributeKey STREAMING_KEY = AttributeKey.booleanKey("streaming");
+ public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name");
+ static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method");
+ static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status");
+ static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid");
+
+ // Metric names
+ public static final String OPERATION_LATENCIES_NAME = "operation_latencies";
+ public static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies";
+ static final String RETRY_COUNT_NAME = "retry_count";
+ static final String CONNECTIVITY_ERROR_COUNT_NAME = "connectivity_error_count";
+ static final String SERVER_LATENCIES_NAME = "server_latencies";
+ static final String FIRST_RESPONSE_LATENCIES_NAME = "first_response_latencies";
+ static final String APPLICATION_BLOCKING_LATENCIES_NAME = "application_latencies";
+ static final String CLIENT_BLOCKING_LATENCIES_NAME = "throttling_latencies";
+ static final String PER_CONNECTION_ERROR_COUNT_NAME = "per_connection_error_count";
+
+ // Buckets under 100,000 are identical to buckets for server side metrics handler_latencies.
+ // Extending client side bucket to up to 3,200,000.
+ private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM =
+ Aggregation.explicitBucketHistogram(
+ ImmutableList.of(
+ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0,
+ 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0,
+ 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0,
+ 400000.0, 800000.0, 1600000.0, 3200000.0)); // max is 53.3 minutes
+
+ private static final Aggregation AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM =
+ Aggregation.explicitBucketHistogram(
+ ImmutableList.of(
+ 1.0,
+ 2.0,
+ 4.0,
+ 8.0,
+ 16.0,
+ 32.0,
+ 64.0,
+ 125.0,
+ 250.0,
+ 500.0,
+ 1_000.0,
+ 2_000.0,
+ 4_000.0,
+ 8_000.0,
+ 16_000.0,
+ 32_000.0,
+ 64_000.0,
+ 128_000.0,
+ 250_000.0,
+ 500_000.0,
+ 1_000_000.0));
+
+ public static final String METER_NAME = "bigtable.googleapis.com/internal/client/";
+
+ static final Set COMMON_ATTRIBUTES =
+ ImmutableSet.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ INSTANCE_ID_KEY,
+ TABLE_ID_KEY,
+ APP_PROFILE_KEY,
+ CLUSTER_ID_KEY,
+ ZONE_ID_KEY,
+ METHOD_KEY,
+ CLIENT_NAME_KEY);
+
+ static void defineView(
+ ImmutableMap.Builder viewMap,
+ String id,
+ Aggregation aggregation,
+ InstrumentType type,
+ String unit,
+ Set attributes) {
+ InstrumentSelector selector =
+ InstrumentSelector.builder()
+ .setName(id)
+ .setMeterName(METER_NAME)
+ .setType(type)
+ .setUnit(unit)
+ .build();
+ Set attributesFilter =
+ ImmutableSet.builder()
+ .addAll(
+ COMMON_ATTRIBUTES.stream().map(AttributeKey::getKey).collect(Collectors.toSet()))
+ .addAll(attributes.stream().map(AttributeKey::getKey).collect(Collectors.toSet()))
+ .build();
+ View view =
+ View.builder()
+ .setName(METER_NAME + id)
+ .setAggregation(aggregation)
+ .setAttributeFilter(attributesFilter)
+ .build();
+
+ viewMap.put(selector, view);
+ }
+
+ public static Map getAllViews() {
+ ImmutableMap.Builder views = ImmutableMap.builder();
+
+ defineView(
+ views,
+ OPERATION_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder()
+ .addAll(COMMON_ATTRIBUTES)
+ .add(STREAMING_KEY, STATUS_KEY)
+ .build());
+ defineView(
+ views,
+ ATTEMPT_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder()
+ .addAll(COMMON_ATTRIBUTES)
+ .add(STREAMING_KEY, STATUS_KEY)
+ .build());
+ defineView(
+ views,
+ SERVER_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ FIRST_RESPONSE_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ APPLICATION_BLOCKING_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build());
+ defineView(
+ views,
+ CLIENT_BLOCKING_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build());
+ defineView(
+ views,
+ RETRY_COUNT_NAME,
+ Aggregation.sum(),
+ InstrumentType.COUNTER,
+ "1",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ CONNECTIVITY_ERROR_COUNT_NAME,
+ Aggregation.sum(),
+ InstrumentType.COUNTER,
+ "1",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+
+ defineView(
+ views,
+ PER_CONNECTION_ERROR_COUNT_NAME,
+ AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "1",
+ ImmutableSet.builder()
+ .add(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME_KEY)
+ .build());
+
+ return views.build();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
index 2d8262a93e..abd214d760 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
@@ -16,13 +16,22 @@
package com.google.cloud.bigtable.data.v2.stub.metrics;
import static com.google.api.gax.tracing.ApiTracerFactory.OperationType;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
import com.google.api.gax.retrying.ServerStreamingAttemptException;
import com.google.api.gax.tracing.SpanName;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapper;
-import com.google.common.annotations.VisibleForTesting;
+import com.google.cloud.bigtable.Version;
import com.google.common.base.Stopwatch;
import com.google.common.math.IntMath;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.DoubleHistogram;
+import io.opentelemetry.api.metrics.LongCounter;
import java.util.concurrent.CancellationException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -37,8 +46,7 @@
*/
class BuiltinMetricsTracer extends BigtableTracer {
- private final StatsRecorderWrapper recorder;
-
+ private static final String NAME = "java-bigtable/" + Version.VERSION;
private final OperationType operationType;
private final SpanName spanName;
@@ -64,21 +72,56 @@ class BuiltinMetricsTracer extends BigtableTracer {
private boolean flowControlIsDisabled = false;
- private AtomicInteger requestLeft = new AtomicInteger(0);
+ private final AtomicInteger requestLeft = new AtomicInteger(0);
// Monitored resource labels
private String tableId = "unspecified";
private String zone = "global";
private String cluster = "unspecified";
- private AtomicLong totalClientBlockingTime = new AtomicLong(0);
+ private final AtomicLong totalClientBlockingTime = new AtomicLong(0);
+
+ private final Attributes baseAttributes;
+
+ private Long serverLatencies = null;
+
+ // OpenCensus (and server) histogram buckets use [start, end), however OpenTelemetry uses (start,
+ // end]. To work around this, we measure all the latencies in nanoseconds and convert them
+ // to milliseconds and use DoubleHistogram. This should minimize the chance of a data
+ // point fall on the bucket boundary that causes off by one errors.
+ private final DoubleHistogram operationLatenciesHistogram;
+ private final DoubleHistogram attemptLatenciesHistogram;
+ private final DoubleHistogram serverLatenciesHistogram;
+ private final DoubleHistogram firstResponseLatenciesHistogram;
+ private final DoubleHistogram clientBlockingLatenciesHistogram;
+ private final DoubleHistogram applicationBlockingLatenciesHistogram;
+ private final LongCounter connectivityErrorCounter;
+ private final LongCounter retryCounter;
- @VisibleForTesting
BuiltinMetricsTracer(
- OperationType operationType, SpanName spanName, StatsRecorderWrapper recorder) {
+ OperationType operationType,
+ SpanName spanName,
+ Attributes attributes,
+ DoubleHistogram operationLatenciesHistogram,
+ DoubleHistogram attemptLatenciesHistogram,
+ DoubleHistogram serverLatenciesHistogram,
+ DoubleHistogram firstResponseLatenciesHistogram,
+ DoubleHistogram clientBlockingLatenciesHistogram,
+ DoubleHistogram applicationBlockingLatenciesHistogram,
+ LongCounter connectivityErrorCounter,
+ LongCounter retryCounter) {
this.operationType = operationType;
this.spanName = spanName;
- this.recorder = recorder;
+ this.baseAttributes = attributes;
+
+ this.operationLatenciesHistogram = operationLatenciesHistogram;
+ this.attemptLatenciesHistogram = attemptLatenciesHistogram;
+ this.serverLatenciesHistogram = serverLatenciesHistogram;
+ this.firstResponseLatenciesHistogram = firstResponseLatenciesHistogram;
+ this.clientBlockingLatenciesHistogram = clientBlockingLatenciesHistogram;
+ this.applicationBlockingLatenciesHistogram = applicationBlockingLatenciesHistogram;
+ this.connectivityErrorCounter = connectivityErrorCounter;
+ this.retryCounter = retryCounter;
}
@Override
@@ -203,13 +246,8 @@ public int getAttempt() {
@Override
public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) {
- // Record the metrics and put in the map after the attempt is done, so we can have cluster and
- // zone information
if (latency != null) {
- recorder.putGfeLatencies(latency);
- recorder.putGfeMissingHeaders(0);
- } else {
- recorder.putGfeMissingHeaders(1);
+ serverLatencies = latency;
}
}
@@ -220,13 +258,13 @@ public void setLocations(String zone, String cluster) {
}
@Override
- public void batchRequestThrottled(long throttledTimeMs) {
- totalClientBlockingTime.addAndGet(throttledTimeMs);
+ public void batchRequestThrottled(long throttledTimeNanos) {
+ totalClientBlockingTime.addAndGet(Duration.ofNanos(throttledTimeNanos).toMillis());
}
@Override
- public void grpcChannelQueuedLatencies(long queuedTimeMs) {
- totalClientBlockingTime.addAndGet(queuedTimeMs);
+ public void grpcChannelQueuedLatencies(long queuedTimeNanos) {
+ totalClientBlockingTime.addAndGet(queuedTimeNanos);
}
@Override
@@ -239,26 +277,43 @@ private void recordOperationCompletion(@Nullable Throwable status) {
return;
}
operationTimer.stop();
- long operationLatency = operationTimer.elapsed(TimeUnit.MILLISECONDS);
+
+ boolean isStreaming = operationType == OperationType.ServerStreaming;
+ String statusStr = Util.extractStatus(status);
+
+ // Publish metric data with all the attributes. The attributes get filtered in
+ // BuiltinMetricsConstants when we construct the views.
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(METHOD_KEY, spanName.toString())
+ .put(CLIENT_NAME_KEY, NAME)
+ .put(STREAMING_KEY, isStreaming)
+ .put(STATUS_KEY, statusStr)
+ .build();
+
long operationLatencyNano = operationTimer.elapsed(TimeUnit.NANOSECONDS);
// Only record when retry count is greater than 0 so the retry
// graph will be less confusing
if (attemptCount > 1) {
- recorder.putRetryCount(attemptCount - 1);
+ retryCounter.add(attemptCount - 1, attributes);
}
+ operationLatenciesHistogram.record(convertToMs(operationLatencyNano), attributes);
+
// serverLatencyTimer should already be stopped in recordAttemptCompletion
- recorder.putOperationLatencies(operationLatency);
- recorder.putApplicationLatencies(
- Duration.ofNanos(operationLatencyNano - totalServerLatencyNano.get()).toMillis());
+ long applicationLatencyNano = operationLatencyNano - totalServerLatencyNano.get();
+ applicationBlockingLatenciesHistogram.record(convertToMs(applicationLatencyNano), attributes);
if (operationType == OperationType.ServerStreaming
&& spanName.getMethodName().equals("ReadRows")) {
- recorder.putFirstResponseLatencies(firstResponsePerOpTimer.elapsed(TimeUnit.MILLISECONDS));
+ firstResponseLatenciesHistogram.record(
+ convertToMs(firstResponsePerOpTimer.elapsed(TimeUnit.NANOSECONDS)), attributes);
}
-
- recorder.recordOperation(Util.extractStatus(status), tableId, zone, cluster);
}
private void recordAttemptCompletion(@Nullable Throwable status) {
@@ -273,8 +328,7 @@ private void recordAttemptCompletion(@Nullable Throwable status) {
}
}
- // Make sure to reset the blocking time after recording it for the next attempt
- recorder.putClientBlockingLatencies(totalClientBlockingTime.getAndSet(0));
+ boolean isStreaming = operationType == OperationType.ServerStreaming;
// Patch the status until it's fixed in gax. When an attempt failed,
// it'll throw a ServerStreamingAttemptException. Unwrap the exception
@@ -283,7 +337,35 @@ private void recordAttemptCompletion(@Nullable Throwable status) {
status = status.getCause();
}
- recorder.putAttemptLatencies(attemptTimer.elapsed(TimeUnit.MILLISECONDS));
- recorder.recordAttempt(Util.extractStatus(status), tableId, zone, cluster);
+ String statusStr = Util.extractStatus(status);
+
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(METHOD_KEY, spanName.toString())
+ .put(CLIENT_NAME_KEY, NAME)
+ .put(STREAMING_KEY, isStreaming)
+ .put(STATUS_KEY, statusStr)
+ .build();
+
+ clientBlockingLatenciesHistogram.record(convertToMs(totalClientBlockingTime.get()), attributes);
+
+ attemptLatenciesHistogram.record(
+ convertToMs(attemptTimer.elapsed(TimeUnit.NANOSECONDS)), attributes);
+
+ if (serverLatencies != null) {
+ serverLatenciesHistogram.record(serverLatencies, attributes);
+ connectivityErrorCounter.add(0, attributes);
+ } else {
+ connectivityErrorCounter.add(1, attributes);
+ }
+ }
+
+ private static double convertToMs(long nanoSeconds) {
+ double toMs = 1e-6;
+ return nanoSeconds * toMs;
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
index 794997071d..f0ac656978 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
@@ -15,29 +15,112 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+
import com.google.api.core.InternalApi;
import com.google.api.gax.tracing.ApiTracer;
import com.google.api.gax.tracing.ApiTracerFactory;
import com.google.api.gax.tracing.BaseApiTracerFactory;
import com.google.api.gax.tracing.SpanName;
-import com.google.cloud.bigtable.stats.StatsWrapper;
-import com.google.common.collect.ImmutableMap;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.DoubleHistogram;
+import io.opentelemetry.api.metrics.LongCounter;
+import io.opentelemetry.api.metrics.Meter;
+import java.io.IOException;
/**
- * {@link ApiTracerFactory} that will generate OpenCensus metrics by using the {@link ApiTracer}
+ * {@link ApiTracerFactory} that will generate OpenTelemetry metrics by using the {@link ApiTracer}
* api.
*/
@InternalApi("For internal use only")
public class BuiltinMetricsTracerFactory extends BaseApiTracerFactory {
- private final ImmutableMap statsAttributes;
+ private final Attributes attributes;
+
+ private static final String MILLISECOND = "ms";
+ private static final String COUNT = "1";
- public static BuiltinMetricsTracerFactory create(ImmutableMap statsAttributes) {
- return new BuiltinMetricsTracerFactory(statsAttributes);
+ private final DoubleHistogram operationLatenciesHistogram;
+ private final DoubleHistogram attemptLatenciesHistogram;
+ private final DoubleHistogram serverLatenciesHistogram;
+ private final DoubleHistogram firstResponseLatenciesHistogram;
+ private final DoubleHistogram clientBlockingLatenciesHistogram;
+ private final DoubleHistogram applicationBlockingLatenciesHistogram;
+ private final LongCounter connectivityErrorCounter;
+ private final LongCounter retryCounter;
+
+ public static BuiltinMetricsTracerFactory create(
+ OpenTelemetry openTelemetry, Attributes attributes) throws IOException {
+ return new BuiltinMetricsTracerFactory(openTelemetry, attributes);
}
- private BuiltinMetricsTracerFactory(ImmutableMap statsAttributes) {
- this.statsAttributes = statsAttributes;
+ BuiltinMetricsTracerFactory(OpenTelemetry openTelemetry, Attributes attributes) {
+ this.attributes = attributes;
+ Meter meter = openTelemetry.getMeter(METER_NAME);
+
+ operationLatenciesHistogram =
+ meter
+ .histogramBuilder(OPERATION_LATENCIES_NAME)
+ .setDescription(
+ "Total time until final operation success or failure, including retries and backoff.")
+ .setUnit(MILLISECOND)
+ .build();
+ attemptLatenciesHistogram =
+ meter
+ .histogramBuilder(ATTEMPT_LATENCIES_NAME)
+ .setDescription("Client observed latency per RPC attempt.")
+ .setUnit(MILLISECOND)
+ .build();
+ serverLatenciesHistogram =
+ meter
+ .histogramBuilder(SERVER_LATENCIES_NAME)
+ .setDescription(
+ "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.")
+ .setUnit(MILLISECOND)
+ .build();
+ firstResponseLatenciesHistogram =
+ meter
+ .histogramBuilder(FIRST_RESPONSE_LATENCIES_NAME)
+ .setDescription(
+ "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.")
+ .setUnit(MILLISECOND)
+ .build();
+ clientBlockingLatenciesHistogram =
+ meter
+ .histogramBuilder(CLIENT_BLOCKING_LATENCIES_NAME)
+ .setDescription(
+ "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.")
+ .setUnit(MILLISECOND)
+ .build();
+ applicationBlockingLatenciesHistogram =
+ meter
+ .histogramBuilder(APPLICATION_BLOCKING_LATENCIES_NAME)
+ .setDescription(
+ "The latency of the client application consuming available response data.")
+ .setUnit(MILLISECOND)
+ .build();
+ connectivityErrorCounter =
+ meter
+ .counterBuilder(CONNECTIVITY_ERROR_COUNT_NAME)
+ .setDescription(
+ "Number of requests that failed to reach the Google datacenter. (Requests without google response headers")
+ .setUnit(COUNT)
+ .build();
+ retryCounter =
+ meter
+ .counterBuilder(RETRY_COUNT_NAME)
+ .setDescription("The number of additional RPCs sent after the initial attempt.")
+ .setUnit(COUNT)
+ .build();
}
@Override
@@ -45,6 +128,14 @@ public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType op
return new BuiltinMetricsTracer(
operationType,
spanName,
- StatsWrapper.createRecorder(operationType, spanName, statsAttributes));
+ attributes,
+ operationLatenciesHistogram,
+ attemptLatenciesHistogram,
+ serverLatenciesHistogram,
+ firstResponseLatenciesHistogram,
+ clientBlockingLatenciesHistogram,
+ applicationBlockingLatenciesHistogram,
+ connectivityErrorCounter,
+ retryCounter);
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java
new file mode 100644
index 0000000000..445160a146
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.auth.Credentials;
+import com.google.auth.oauth2.GoogleCredentials;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader;
+import java.io.IOException;
+import java.util.Map;
+import javax.annotation.Nullable;
+
+/**
+ * A util class to register built-in metrics on a custom OpenTelemetry instance. This is for
+ * advanced usage, and is only necessary when wanting to write built-in metrics to cloud monitoring
+ * and custom sinks. Please refer to {@link CustomOpenTelemetryMetricsProvider} for example usage.
+ */
+public class BuiltinMetricsView {
+
+ private BuiltinMetricsView() {}
+
+ /**
+ * Register built-in metrics on the {@link SdkMeterProviderBuilder} with application default
+ * credentials.
+ */
+ public static void registerBuiltinMetrics(String projectId, SdkMeterProviderBuilder builder)
+ throws IOException {
+ BuiltinMetricsView.registerBuiltinMetrics(
+ projectId, GoogleCredentials.getApplicationDefault(), builder);
+ }
+
+ /** Register built-in metrics on the {@link SdkMeterProviderBuilder} with credentials. */
+ public static void registerBuiltinMetrics(
+ String projectId, @Nullable Credentials credentials, SdkMeterProviderBuilder builder)
+ throws IOException {
+ MetricExporter metricExporter = BigtableCloudMonitoringExporter.create(projectId, credentials);
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ builder.registerView(entry.getKey(), entry.getValue());
+ }
+ builder.registerMetricReader(PeriodicMetricReader.create(metricExporter));
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java
new file mode 100644
index 0000000000..ba3034559d
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.common.base.MoreObjects;
+import io.opentelemetry.api.OpenTelemetry;
+
+/**
+ * Set a custom OpenTelemetry instance.
+ *
+ * To register client side metrics on the custom OpenTelemetry:
+ *
+ *
{@code
+ * SdkMeterProviderBuilder sdkMeterProvider = SdkMeterProvider.builder();
+ *
+ * // register Builtin metrics on your meter provider with default credentials
+ * BuiltinMetricsViews.registerBuiltinMetrics("project-id", sdkMeterProvider);
+ *
+ * // register other metrics reader and views
+ * sdkMeterProvider.registerMetricReader(..);
+ * sdkMeterProvider.registerView(..);
+ *
+ * // create the OTEL instance
+ * OpenTelemetry openTelemetry = OpenTelemetrySdk
+ * .builder()
+ * .setMeterProvider(sdkMeterProvider.build())
+ * .build();
+ *
+ * // Override MetricsProvider in BigtableDataSettings
+ * BigtableDataSettings settings = BigtableDataSettings.newBuilder()
+ * .setProjectId("my-project")
+ * .setInstanceId("my-instance-id")
+ * .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)
+ * .build();
+ * }
+ */
+public final class CustomOpenTelemetryMetricsProvider implements MetricsProvider {
+
+ private final OpenTelemetry otel;
+
+ public static CustomOpenTelemetryMetricsProvider create(OpenTelemetry otel) {
+ return new CustomOpenTelemetryMetricsProvider(otel);
+ }
+
+ private CustomOpenTelemetryMetricsProvider(OpenTelemetry otel) {
+ this.otel = otel;
+ }
+
+ public OpenTelemetry getOpenTelemetry() {
+ return otel;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("openTelemetry", otel).toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
new file mode 100644
index 0000000000..b8aad8c931
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalApi;
+import com.google.auth.Credentials;
+import com.google.common.base.MoreObjects;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import java.io.IOException;
+import javax.annotation.Nullable;
+
+/**
+ * Set {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)},
+ * to {@link this#INSTANCE} to enable collecting and export client side metrics
+ * https://cloud.google.com/bigtable/docs/client-side-metrics. This is the default setting in {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings}.
+ */
+public final class DefaultMetricsProvider implements MetricsProvider {
+
+ public static DefaultMetricsProvider INSTANCE = new DefaultMetricsProvider();
+
+ private OpenTelemetry openTelemetry;
+ private String projectId;
+
+ private DefaultMetricsProvider() {}
+
+ @InternalApi
+ public OpenTelemetry getOpenTelemetry(String projectId, @Nullable Credentials credentials)
+ throws IOException {
+ this.projectId = projectId;
+ if (openTelemetry == null) {
+ SdkMeterProviderBuilder meterProvider = SdkMeterProvider.builder();
+ BuiltinMetricsView.registerBuiltinMetrics(projectId, credentials, meterProvider);
+ openTelemetry = OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+ }
+ return openTelemetry;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("projectId", projectId)
+ .add("openTelemetry", openTelemetry)
+ .toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
index cab3b0bbd0..a891df9509 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
@@ -15,12 +15,15 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME;
+
import com.google.api.core.InternalApi;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection;
-import com.google.cloud.bigtable.stats.StatsWrapper;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableMap;
import io.grpc.ClientInterceptor;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.LongHistogram;
+import io.opentelemetry.api.metrics.Meter;
import java.util.Collections;
import java.util.Set;
import java.util.WeakHashMap;
@@ -30,24 +33,30 @@
/* Background task that goes through all connections and updates the errors_per_connection metric. */
@InternalApi("For internal use only")
public class ErrorCountPerConnectionMetricTracker implements Runnable {
+
private static final Integer PER_CONNECTION_ERROR_COUNT_PERIOD_SECONDS = 60;
+
+ private final LongHistogram perConnectionErrorCountHistogram;
+ private final Attributes attributes;
+
private final Set connectionErrorCountInterceptors;
private final Object interceptorsLock = new Object();
- // This is not final so that it can be updated and mocked during testing.
- private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection;
- @VisibleForTesting
- void setStatsRecorderWrapperForConnection(
- StatsRecorderWrapperForConnection statsRecorderWrapperForConnection) {
- this.statsRecorderWrapperForConnection = statsRecorderWrapperForConnection;
- }
-
- public ErrorCountPerConnectionMetricTracker(ImmutableMap builtinAttributes) {
+ public ErrorCountPerConnectionMetricTracker(OpenTelemetry openTelemetry, Attributes attributes) {
connectionErrorCountInterceptors =
Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<>()));
- this.statsRecorderWrapperForConnection =
- StatsWrapper.createRecorderForConnection(builtinAttributes);
+ Meter meter = openTelemetry.getMeter(METER_NAME);
+
+ perConnectionErrorCountHistogram =
+ meter
+ .histogramBuilder(PER_CONNECTION_ERROR_COUNT_NAME)
+ .ofLongs()
+ .setDescription("Distribution of counts of channels per 'error count per minute'.")
+ .setUnit("1")
+ .build();
+
+ this.attributes = attributes;
}
public void startConnectionErrorCountTracker(ScheduledExecutorService scheduler) {
@@ -75,7 +84,7 @@ public void run() {
if (errors > 0 || successes > 0) {
// TODO: add a metric to also keep track of the number of successful requests per each
// connection.
- statsRecorderWrapperForConnection.putAndRecordPerConnectionErrorCount(errors);
+ perConnectionErrorCountHistogram.record(errors, attributes);
}
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java
new file mode 100644
index 0000000000..251bb41619
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalExtensionOnly;
+
+/**
+ * Provide client side metrics https://cloud.google.com/bigtable/docs/client-side-metrics
+ * implementations.
+ */
+@InternalExtensionOnly
+public interface MetricsProvider {}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
new file mode 100644
index 0000000000..9a00ddb135
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Set {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)},
+ * to {@link this#INSTANCE} to disable collecting and export client side metrics
+ * https://cloud.google.com/bigtable/docs/client-side-metrics.
+ */
+public final class NoopMetricsProvider implements MetricsProvider {
+
+ public static NoopMetricsProvider INSTANCE = new NoopMetricsProvider();
+
+ private NoopMetricsProvider() {}
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
index b7140f0156..ce73d75dc1 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
@@ -21,6 +21,7 @@
import com.google.api.gax.rpc.ApiCallContext;
import com.google.api.gax.rpc.UnaryCallable;
import com.google.api.gax.tracing.ApiTracer;
+import org.threeten.bp.Duration;
/**
* This callable will extract total throttled time from {@link ApiCallContext} and add it to {@link
@@ -42,7 +43,8 @@ public ApiFuture futureCall(RequestT request, ApiCallContext context)
// this should always be true
if (tracer instanceof BigtableTracer) {
((BigtableTracer) tracer)
- .batchRequestThrottled(context.getOption(Batcher.THROTTLED_TIME_KEY));
+ .batchRequestThrottled(
+ Duration.ofMillis(context.getOption(Batcher.THROTTLED_TIME_KEY)).toNanos());
}
}
return innerCallable.futureCall(request, context);
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
index a35112b380..fea66e82bf 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
@@ -36,6 +36,7 @@
import com.google.bigtable.v2.ReadRowsResponse;
import com.google.cloud.bigtable.data.v2.internal.NameUtil;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider;
import com.google.common.base.Preconditions;
import com.google.common.io.BaseEncoding;
import io.grpc.Attributes;
@@ -169,10 +170,13 @@ public void tearDown() {
@Test
public void testNewClientsShareTransportChannel() throws Exception {
-
// Create 3 lightweight clients
-
- try (BigtableDataClientFactory factory = BigtableDataClientFactory.create(defaultSettings);
+ try (BigtableDataClientFactory factory =
+ BigtableDataClientFactory.create(
+ defaultSettings
+ .toBuilder()
+ .setMetricsProvider(NoopMetricsProvider.INSTANCE)
+ .build());
BigtableDataClient ignored1 = factory.createForInstance("project1", "instance1");
BigtableDataClient ignored2 = factory.createForInstance("project2", "instance2");
BigtableDataClient ignored3 = factory.createForInstance("project3", "instance3")) {
@@ -316,7 +320,7 @@ public void testFeatureFlags() throws Exception {
@Test
public void testBulkMutationFlowControllerConfigured() throws Exception {
BigtableDataSettings settings =
- BigtableDataSettings.newBuilder()
+ BigtableDataSettings.newBuilderForEmulator(server.getPort())
.setProjectId("my-project")
.setInstanceId("my-instance")
.setCredentialsProvider(credentialsProvider)
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
index 4e75fb8631..56181a20ab 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
@@ -15,34 +15,64 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getStartTimeSeconds;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes;
+import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.Truth.assertWithMessage;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.client.util.Lists;
+import com.google.cloud.bigtable.admin.v2.BigtableInstanceAdminClient;
import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient;
+import com.google.cloud.bigtable.admin.v2.models.AppProfile;
+import com.google.cloud.bigtable.admin.v2.models.CreateAppProfileRequest;
import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest;
import com.google.cloud.bigtable.admin.v2.models.Table;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.PrefixGenerator;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
import com.google.cloud.monitoring.v3.MetricServiceClient;
import com.google.common.base.Stopwatch;
+import com.google.common.collect.BoundType;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Range;
import com.google.monitoring.v3.ListTimeSeriesRequest;
import com.google.monitoring.v3.ListTimeSeriesResponse;
+import com.google.monitoring.v3.Point;
import com.google.monitoring.v3.ProjectName;
import com.google.monitoring.v3.TimeInterval;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Timestamp;
import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.common.AttributesBuilder;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
@@ -50,6 +80,7 @@
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.threeten.bp.Duration;
+import org.threeten.bp.Instant;
@RunWith(JUnit4.class)
public class BuiltinMetricsIT {
@@ -58,71 +89,131 @@ public class BuiltinMetricsIT {
private static final Logger logger = Logger.getLogger(BuiltinMetricsIT.class.getName());
@Rule public Timeout globalTimeout = Timeout.seconds(900);
- private static Table table;
- private static BigtableTableAdminClient tableAdminClient;
- private static MetricServiceClient metricClient;
+
+ private Table tableCustomOtel;
+ private Table tableDefault;
+ private BigtableDataClient clientCustomOtel;
+ private BigtableDataClient clientDefault;
+ private BigtableTableAdminClient tableAdminClient;
+ private BigtableInstanceAdminClient instanceAdminClient;
+ private MetricServiceClient metricClient;
+
+ private InMemoryMetricReader metricReader;
+ private String appProfileCustomOtel;
+ private String appProfileDefault;
public static String[] VIEWS = {
"operation_latencies",
"attempt_latencies",
"connectivity_error_count",
- "application_blocking_latencies"
+ "application_blocking_latencies",
};
- @BeforeClass
- public static void setUpClass() throws IOException {
+ @Before
+ public void setup() throws IOException {
+ // This test tests 2 things. End-to-end test using the default OTEL instance created by the
+ // client, and also end-to-end test using a custom OTEL instance set by the customer. In
+ // both tests, a BigtableCloudMonitoringExporter is created to export data to Cloud Monitoring.
assume()
.withMessage("Builtin metrics integration test is not supported by emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- // Enable built in metrics
- BigtableDataSettings.enableBuiltinMetrics();
-
// Create a cloud monitoring client
metricClient = MetricServiceClient.create();
tableAdminClient = testEnvRule.env().getTableAdminClient();
+ instanceAdminClient = testEnvRule.env().getInstanceAdminClient();
+ appProfileCustomOtel = PrefixGenerator.newPrefix("test1");
+ appProfileDefault = PrefixGenerator.newPrefix("test2");
+ instanceAdminClient.createAppProfile(
+ CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileCustomOtel)
+ .setRoutingPolicy(
+ AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId()))
+ .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW)));
+ instanceAdminClient.createAppProfile(
+ CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileDefault)
+ .setRoutingPolicy(
+ AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId()))
+ .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW)));
+
+ // When using the custom OTEL instance, we can also register a InMemoryMetricReader on the
+ // SdkMeterProvider to verify the data exported on Cloud Monitoring with the in memory metric
+ // data collected in InMemoryMetricReader.
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ clientCustomOtel =
+ BigtableDataClient.create(
+ settings
+ .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry))
+ .setAppProfileId(appProfileCustomOtel)
+ .build());
+ clientDefault = BigtableDataClient.create(settings.setAppProfileId(appProfileDefault).build());
}
- @AfterClass
- public static void tearDown() {
+ @After
+ public void tearDown() {
if (metricClient != null) {
metricClient.close();
}
- if (table != null) {
- tableAdminClient.deleteTable(table.getId());
+ if (tableCustomOtel != null) {
+ tableAdminClient.deleteTable(tableCustomOtel.getId());
+ }
+ if (tableDefault != null) {
+ tableAdminClient.deleteTable(tableDefault.getId());
+ }
+ if (instanceAdminClient != null) {
+ instanceAdminClient.deleteAppProfile(
+ testEnvRule.env().getInstanceId(), appProfileCustomOtel, true);
+ instanceAdminClient.deleteAppProfile(
+ testEnvRule.env().getInstanceId(), appProfileDefault, true);
+ }
+ if (clientCustomOtel != null) {
+ clientCustomOtel.close();
+ }
+ if (clientDefault != null) {
+ clientDefault.close();
}
}
@Test
- public void testBuiltinMetrics() throws Exception {
- logger.info("Started testing builtin metrics");
- table =
+ public void testBuiltinMetricsWithDefaultOTEL() throws Exception {
+ logger.info("Started testing builtin metrics with default OTEL");
+ tableDefault =
tableAdminClient.createTable(
- CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test"))
+ CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test1"))
.addFamily("cf"));
- logger.info("Create table: " + table.getId());
- // Send a MutateRow and ReadRows request
- testEnvRule
- .env()
- .getDataClient()
- .mutateRow(RowMutation.create(table.getId(), "a-new-key").setCell("cf", "q", "abc"));
+ logger.info("Create default table: " + tableDefault.getId());
+
+ Instant start = Instant.now().minus(Duration.ofSeconds(10));
+
+ // Send a MutateRow and ReadRows request and measure the latencies for these requests.
+ clientDefault.mutateRow(
+ RowMutation.create(tableDefault.getId(), "a-new-key").setCell("cf", "q", "abc"));
ArrayList rows =
- Lists.newArrayList(
- testEnvRule.env().getDataClient().readRows(Query.create(table.getId()).limit(10)));
+ Lists.newArrayList(clientDefault.readRows(Query.create(tableDefault.getId()).limit(10)));
- Stopwatch stopwatch = Stopwatch.createStarted();
+ // This stopwatch is used for to limit fetching of metric data in verifyMetrics
+ Stopwatch metricsPollingStopwatch = Stopwatch.createStarted();
ProjectName name = ProjectName.of(testEnvRule.env().getProjectId());
- // Restrict time to last 10 minutes and 5 minutes after the request
- long startMillis = System.currentTimeMillis() - Duration.ofMinutes(10).toMillis();
- long endMillis = startMillis + Duration.ofMinutes(15).toMillis();
+ // Interval is set in the monarch request when query metric timestamps.
+ // Restrict it to before we send to request and 3 minute after we send the request. If
+ // it turns out to be still flaky we can increase the filter range.
+ Instant end = Instant.now().plus(Duration.ofMinutes(3));
TimeInterval interval =
TimeInterval.newBuilder()
- .setStartTime(Timestamps.fromMillis(startMillis))
- .setEndTime(Timestamps.fromMillis(endMillis))
+ .setStartTime(Timestamps.fromMillis(start.toEpochMilli()))
+ .setEndTime(Timestamps.fromMillis(end.toEpochMilli()))
.build();
for (String view : VIEWS) {
@@ -132,42 +223,123 @@ public void testBuiltinMetrics() throws Exception {
String.format(
"metric.type=\"bigtable.googleapis.com/client/%s\" "
+ "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\""
- + " AND resource.labels.table=\"%s\"",
- view, testEnvRule.env().getInstanceId(), table.getId());
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault);
ListTimeSeriesRequest.Builder requestBuilder =
ListTimeSeriesRequest.newBuilder()
.setName(name.toString())
.setFilter(metricFilter)
.setInterval(interval)
.setView(ListTimeSeriesRequest.TimeSeriesView.FULL);
-
- verifyMetricsArePublished(requestBuilder.build(), stopwatch, view);
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
// Verify that metrics are published for ReadRows request
metricFilter =
String.format(
"metric.type=\"bigtable.googleapis.com/client/%s\" "
+ "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\""
- + " AND resource.labels.table=\"%s\"",
- view, testEnvRule.env().getInstanceId(), table.getId());
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault);
+ requestBuilder.setFilter(metricFilter);
+
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ }
+ }
+
+ @Test
+ public void testBuiltinMetricsWithCustomOTEL() throws Exception {
+ logger.info("Started testing builtin metrics with custom OTEL");
+ tableCustomOtel =
+ tableAdminClient.createTable(
+ CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test2"))
+ .addFamily("cf"));
+ logger.info("Create custom table: " + tableCustomOtel.getId());
+
+ Instant start = Instant.now().minus(Duration.ofSeconds(10));
+ // Send a MutateRow and ReadRows request and measure the latencies for these requests.
+ clientCustomOtel.mutateRow(
+ RowMutation.create(tableCustomOtel.getId(), "a-new-key").setCell("cf", "q", "abc"));
+ ArrayList rows =
+ Lists.newArrayList(
+ clientCustomOtel.readRows(Query.create(tableCustomOtel.getId()).limit(10)));
+
+ // This stopwatch is used for to limit fetching of metric data in verifyMetrics
+ Stopwatch metricsPollingStopwatch = Stopwatch.createStarted();
+
+ ProjectName name = ProjectName.of(testEnvRule.env().getProjectId());
+
+ Collection fromMetricReader = metricReader.collectAllMetrics();
+
+ // Interval is set in the monarch request when query metric timestamps.
+ // Restrict it to before we send to request and 3 minute after we send the request. If
+ // it turns out to be still flaky we can increase the filter range.
+ Instant end = start.plus(Duration.ofMinutes(3));
+ TimeInterval interval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromMillis(start.toEpochMilli()))
+ .setEndTime(Timestamps.fromMillis(end.toEpochMilli()))
+ .build();
+
+ for (String view : VIEWS) {
+ String otelMetricName = view;
+ if (view.equals("application_blocking_latencies")) {
+ otelMetricName = "application_latencies";
+ }
+ MetricData dataFromReader = getMetricData(fromMetricReader, otelMetricName);
+
+ // Filter on instance and method name
+ // Verify that metrics are correct for MutateRows request
+ String metricFilter =
+ String.format(
+ "metric.type=\"bigtable.googleapis.com/client/%s\" "
+ + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\""
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view,
+ testEnvRule.env().getInstanceId(),
+ tableCustomOtel.getId(),
+ appProfileCustomOtel);
+ ListTimeSeriesRequest.Builder requestBuilder =
+ ListTimeSeriesRequest.newBuilder()
+ .setName(name.toString())
+ .setFilter(metricFilter)
+ .setInterval(interval)
+ .setView(ListTimeSeriesRequest.TimeSeriesView.FULL);
+
+ ListTimeSeriesResponse response =
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ verifyMetricsWithMetricsReader(response, dataFromReader);
+
+ // Verify that metrics are correct for ReadRows request
+ metricFilter =
+ String.format(
+ "metric.type=\"bigtable.googleapis.com/client/%s\" "
+ + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\""
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view,
+ testEnvRule.env().getInstanceId(),
+ tableCustomOtel.getId(),
+ appProfileCustomOtel);
requestBuilder.setFilter(metricFilter);
- verifyMetricsArePublished(requestBuilder.build(), stopwatch, view);
+ response = verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ verifyMetricsWithMetricsReader(response, dataFromReader);
}
}
- private void verifyMetricsArePublished(
- ListTimeSeriesRequest request, Stopwatch stopwatch, String view) throws Exception {
+ private ListTimeSeriesResponse verifyMetricsArePublished(
+ ListTimeSeriesRequest request, Stopwatch metricsPollingStopwatch, String view)
+ throws Exception {
ListTimeSeriesResponse response = metricClient.listTimeSeriesCallable().call(request);
- logger.log(
- Level.INFO,
- "Checking for view "
- + view
- + ", has timeseries="
- + response.getTimeSeriesCount()
- + " stopwatch elapsed "
- + stopwatch.elapsed(TimeUnit.MINUTES));
- while (response.getTimeSeriesCount() == 0 && stopwatch.elapsed(TimeUnit.MINUTES) < 10) {
+ while (response.getTimeSeriesCount() == 0
+ && metricsPollingStopwatch.elapsed(TimeUnit.MINUTES) < 10) {
+ logger.log(
+ Level.INFO,
+ "Checking for view "
+ + view
+ + ", has timeseries="
+ + response.getTimeSeriesCount()
+ + " stopwatch elapsed "
+ + metricsPollingStopwatch.elapsed(TimeUnit.MINUTES));
// Call listTimeSeries every minute
Thread.sleep(Duration.ofMinutes(1).toMillis());
response = metricClient.listTimeSeriesCallable().call(request);
@@ -176,5 +348,64 @@ private void verifyMetricsArePublished(
assertWithMessage("View " + view + " didn't return any data.")
.that(response.getTimeSeriesCount())
.isGreaterThan(0);
+
+ return response;
+ }
+
+ private void verifyMetricsWithMetricsReader(
+ ListTimeSeriesResponse response, MetricData dataFromReader) {
+ for (TimeSeries ts : response.getTimeSeriesList()) {
+ Map attributesMap =
+ ImmutableMap.builder()
+ .putAll(ts.getResource().getLabelsMap())
+ .putAll(ts.getMetric().getLabelsMap())
+ .build();
+ AttributesBuilder attributesBuilder = Attributes.builder();
+ String streamingKey = BuiltinMetricsConstants.STREAMING_KEY.getKey();
+ attributesMap.forEach(
+ (k, v) -> {
+ if (!k.equals(streamingKey)) {
+ attributesBuilder.put(k, v);
+ }
+ });
+ if (attributesMap.containsKey(streamingKey)) {
+ attributesBuilder.put(streamingKey, Boolean.parseBoolean(attributesMap.get(streamingKey)));
+ }
+ Attributes attributes = attributesBuilder.build();
+ verifyAttributes(dataFromReader, attributes);
+ long expectedValue = getAggregatedValue(dataFromReader, attributes);
+ Timestamp startTime = getStartTimeSeconds(dataFromReader, attributes);
+ assertThat(startTime.getSeconds()).isGreaterThan(0);
+ List point =
+ ts.getPointsList().stream()
+ .filter(
+ p ->
+ Timestamps.compare(p.getInterval().getStartTime(), startTime) >= 0
+ && Timestamps.compare(
+ p.getInterval().getStartTime(),
+ Timestamps.add(
+ startTime,
+ com.google.protobuf.Duration.newBuilder()
+ .setSeconds(60)
+ .build()))
+ < 0)
+ .collect(Collectors.toList());
+ if (point.size() > 0) {
+ long actualValue = (long) point.get(0).getValue().getDistributionValue().getMean();
+ assertWithMessage(
+ "actual value does not match expected value, actual value "
+ + actualValue
+ + " expected value "
+ + expectedValue
+ + " actual start time "
+ + point.get(0).getInterval().getStartTime()
+ + " expected start time "
+ + startTime)
+ .that(actualValue)
+ .isIn(
+ Range.range(
+ expectedValue - 1, BoundType.CLOSED, expectedValue + 1, BoundType.CLOSED));
+ }
+ }
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
new file mode 100644
index 0000000000..56f6bfa476
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.it;
+
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.common.truth.Correspondence;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+
+public class MetricsITUtils {
+
+ static final Correspondence METRIC_DATA_NAME_CONTAINS =
+ Correspondence.from((md, s) -> md.getName().contains(s), "contains name");
+
+ static final Correspondence POINT_DATA_CLUSTER_ID_CONTAINS =
+ Correspondence.from(
+ (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY).contains(s),
+ "contains attributes");
+
+ static final Correspondence POINT_DATA_ZONE_ID_CONTAINS =
+ Correspondence.from(
+ (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY).contains(s),
+ "contains attributes");
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
index b0e12d5ade..84ab24f1c8 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
@@ -15,37 +15,76 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.core.ApiFuture;
import com.google.api.gax.rpc.NotFoundException;
import com.google.cloud.bigtable.admin.v2.models.Cluster;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
-import com.google.cloud.bigtable.stats.BuiltinViews;
-import com.google.cloud.bigtable.stats.StatsWrapper;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
import com.google.common.collect.Lists;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
public class StreamingMetricsMetadataIT {
@ClassRule public static TestEnvRule testEnvRule = new TestEnvRule();
- @BeforeClass
- public static void setUpClass() {
+ private BigtableDataClient client;
+ private InMemoryMetricReader metricReader;
+
+ @Before
+ public void setup() throws IOException {
assume()
.withMessage("StreamingMetricsMetadataIT is not supported on Emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- BuiltinViews.registerBigtableBuiltinViews();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry));
+ client = BigtableDataClient.create(settings.build());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ if (client != null) {
+ client.close();
+ }
}
@Test
@@ -54,7 +93,7 @@ public void testSuccess() throws Exception {
String uniqueKey = prefix + "-read";
Query query = Query.create(testEnvRule.env().getTableId()).rowKey(uniqueKey);
- ArrayList rows = Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query));
+ ArrayList rows = Lists.newArrayList(client.readRows(query));
ApiFuture> clustersFuture =
testEnvRule
@@ -64,27 +103,73 @@ public void testSuccess() throws Exception {
List clusters = clustersFuture.get(1, TimeUnit.MINUTES);
- // give opencensus some time to populate view data
- Thread.sleep(100);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ metricReader.collectAllMetrics().stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains(clusters.get(0).getZone());
- assertThat(tagValueStrings).contains(clusters.get(0).getId());
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
+
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS)
+ .contains(clusters.get(0).getZone());
+ assertThat(clusterAttributes).contains(clusters.get(0).getId());
+ assertThat(zoneAttributes).contains(clusters.get(0).getZone());
}
@Test
- public void testFailure() throws InterruptedException {
+ public void testFailure() {
Query query = Query.create("non-exist-table");
try {
- Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query));
+ Lists.newArrayList(client.readRows(query));
} catch (NotFoundException e) {
}
- // give opencensus some time to populate view data
- Thread.sleep(100);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ metricReader.collectAllMetrics().stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
+
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains("unspecified");
- assertThat(tagValueStrings).contains("global");
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains("unspecified");
+ assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global");
+ assertThat(clusterAttributes).contains("unspecified");
+ assertThat(zoneAttributes).contains("global");
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
index aa2a4317fc..ad5f71db8f 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
@@ -15,35 +15,76 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.core.ApiFuture;
import com.google.api.gax.rpc.NotFoundException;
import com.google.cloud.bigtable.admin.v2.models.Cluster;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
-import com.google.cloud.bigtable.stats.BuiltinViews;
-import com.google.cloud.bigtable.stats.StatsWrapper;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
public class UnaryMetricsMetadataIT {
@ClassRule public static TestEnvRule testEnvRule = new TestEnvRule();
- @BeforeClass
- public static void setUpClass() {
+ private BigtableDataClient client;
+ private InMemoryMetricReader metricReader;
+
+ @Before
+ public void setup() throws IOException {
assume()
.withMessage("UnaryMetricsMetadataIT is not supported on Emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- BuiltinViews.registerBigtableBuiltinViews();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry));
+
+ client = BigtableDataClient.create(settings.build());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ if (client != null) {
+ client.close();
+ }
}
@Test
@@ -52,9 +93,7 @@ public void testSuccess() throws Exception {
String familyId = testEnvRule.env().getFamilyId();
ApiFuture future =
- testEnvRule
- .env()
- .getDataClient()
+ client
.mutateRowCallable()
.futureCall(
RowMutation.create(testEnvRule.env().getTableId(), rowKey)
@@ -69,18 +108,36 @@ public void testSuccess() throws Exception {
.listClustersAsync(testEnvRule.env().getInstanceId());
List clusters = clustersFuture.get(1, TimeUnit.MINUTES);
- // give opencensus some time to populate view data
- for (int i = 0; i < 10; i++) {
- if (StatsWrapper.getOperationLatencyViewTagValueStrings()
- .contains(clusters.get(0).getZone())) {
- break;
- }
- Thread.sleep(100);
- }
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ allMetricData.stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
+
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains(clusters.get(0).getZone());
- assertThat(tagValueStrings).contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS)
+ .contains(clusters.get(0).getZone());
+ assertThat(clusterAttributes).contains(clusters.get(0).getId());
+ assertThat(zoneAttributes).contains(clusters.get(0).getZone());
}
@Test
@@ -89,9 +146,7 @@ public void testFailure() throws Exception {
String familyId = testEnvRule.env().getFamilyId();
ApiFuture future =
- testEnvRule
- .env()
- .getDataClient()
+ client
.mutateRowCallable()
.futureCall(
RowMutation.create("non-exist-table", rowKey).setCell(familyId, "q", "myVal"));
@@ -106,16 +161,39 @@ public void testFailure() throws Exception {
}
}
- // give opencensus some time to populate view data
- for (int i = 0; i < 10; i++) {
- if (StatsWrapper.getOperationLatencyViewTagValueStrings().contains("unspecified")) {
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = null;
+ for (MetricData md : allMetricData) {
+ if (md.getName()
+ .equals(
+ BuiltinMetricsConstants.METER_NAME
+ + BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME)) {
+ metricData = md;
break;
}
- Thread.sleep(100);
}
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains("unspecified");
- assertThat(tagValueStrings).contains("global");
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metricData).isNotNull();
+
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains("unspecified");
+ assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global");
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
+
+ assertThat(clusterAttributes).contains("unspecified");
+ assertThat(zoneAttributes).contains("global");
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
index 79cbccb0ac..290fcc321f 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
@@ -885,6 +885,7 @@ public void enableRetryInfoFalseValueTest() throws IOException {
"generateInitialChangeStreamPartitionsSettings",
"readChangeStreamSettings",
"pingAndWarmSettings",
+ "metricsProvider",
};
@Test
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java
new file mode 100644
index 0000000000..a0b9c058dc
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+import static com.google.common.truth.Truth.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.google.api.Distribution;
+import com.google.api.MonitoredResource;
+import com.google.api.core.ApiFuture;
+import com.google.api.core.ApiFutures;
+import com.google.api.gax.rpc.UnaryCallable;
+import com.google.cloud.monitoring.v3.MetricServiceClient;
+import com.google.cloud.monitoring.v3.stub.MetricServiceStub;
+import com.google.common.collect.ImmutableList;
+import com.google.monitoring.v3.CreateTimeSeriesRequest;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Empty;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.common.InstrumentationScopeInfo;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData;
+import io.opentelemetry.sdk.resources.Resource;
+import java.util.Arrays;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnit;
+import org.mockito.junit.MockitoRule;
+
+public class BigtableCloudMonitoringExporterTest {
+ private static final String projectId = "fake-project";
+ private static final String instanceId = "fake-instance";
+ private static final String appProfileId = "default";
+ private static final String tableId = "fake-table";
+ private static final String zone = "us-east-1";
+ private static final String cluster = "cluster-1";
+
+ private static final String clientName = "fake-client-name";
+ private static final String taskId = "fake-task-id";
+
+ @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule();
+
+ @Mock private MetricServiceStub mockMetricServiceStub;
+ private MetricServiceClient fakeMetricServiceClient;
+ private BigtableCloudMonitoringExporter exporter;
+
+ private Attributes attributes;
+ private Resource resource;
+ private InstrumentationScopeInfo scope;
+
+ @Before
+ public void setUp() {
+ fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub);
+
+ exporter =
+ new BigtableCloudMonitoringExporter(
+ projectId, fakeMetricServiceClient, /* applicationResource= */ null, taskId);
+
+ attributes =
+ Attributes.builder()
+ .put(BIGTABLE_PROJECT_ID_KEY, projectId)
+ .put(INSTANCE_ID_KEY, instanceId)
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(APP_PROFILE_KEY, appProfileId)
+ .build();
+
+ resource = Resource.create(Attributes.empty());
+
+ scope = InstrumentationScopeInfo.create(BuiltinMetricsConstants.METER_NAME);
+ }
+
+ @After
+ public void tearDown() {}
+
+ @Test
+ public void testExportingSumData() {
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long fakeValue = 11L;
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ LongPointData longPointData =
+ ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue);
+
+ MetricData longData =
+ ImmutableMetricData.createLongSum(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/retry_count",
+ "description",
+ "1",
+ ImmutableSumData.create(
+ true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData)));
+
+ exporter.export(Arrays.asList(longData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly(
+ BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
+ INSTANCE_ID_KEY.getKey(), instanceId,
+ TABLE_ID_KEY.getKey(), tableId,
+ CLUSTER_ID_KEY.getKey(), cluster,
+ ZONE_ID_KEY.getKey(), zone);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
+ assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue);
+ assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
+ .isEqualTo(startEpoch);
+ assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch);
+ }
+
+ @Test
+ public void testExportingHistogramData() {
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ HistogramPointData histogramPointData =
+ ImmutableHistogramPointData.create(
+ startEpoch,
+ endEpoch,
+ attributes,
+ 3d,
+ true,
+ 1d, // min
+ true,
+ 2d, // max
+ Arrays.asList(1.0),
+ Arrays.asList(1L, 2L));
+
+ MetricData histogramData =
+ ImmutableMetricData.createDoubleHistogram(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/operation_latencies",
+ "description",
+ "ms",
+ ImmutableHistogramData.create(
+ AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData)));
+
+ exporter.export(Arrays.asList(histogramData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly(
+ BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
+ INSTANCE_ID_KEY.getKey(), instanceId,
+ TABLE_ID_KEY.getKey(), tableId,
+ CLUSTER_ID_KEY.getKey(), cluster,
+ ZONE_ID_KEY.getKey(), zone);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
+ Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue();
+ assertThat(distribution.getCount()).isEqualTo(3);
+ assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
+ .isEqualTo(startEpoch);
+ assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch);
+ }
+
+ @Test
+ public void testTimeSeriesForMetricWithGceOrGkeResource() {
+ String gceProjectId = "fake-gce-project";
+ BigtableCloudMonitoringExporter exporter =
+ new BigtableCloudMonitoringExporter(
+ projectId,
+ fakeMetricServiceClient,
+ MonitoredResource.newBuilder()
+ .setType("gce-instance")
+ .putLabels("some-gce-key", "some-gce-value")
+ .putLabels("project_id", gceProjectId)
+ .build(),
+ taskId);
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ HistogramPointData histogramPointData =
+ ImmutableHistogramPointData.create(
+ startEpoch,
+ endEpoch,
+ Attributes.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ projectId,
+ INSTANCE_ID_KEY,
+ instanceId,
+ APP_PROFILE_KEY,
+ appProfileId,
+ CLIENT_NAME_KEY,
+ clientName),
+ 3d,
+ true,
+ 1d, // min
+ true,
+ 2d, // max
+ Arrays.asList(1.0),
+ Arrays.asList(1L, 2L));
+
+ MetricData histogramData =
+ ImmutableMetricData.createDoubleHistogram(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/per_connection_error_count",
+ "description",
+ "ms",
+ ImmutableHistogramData.create(
+ AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData)));
+
+ exporter.export(Arrays.asList(histogramData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getName()).isEqualTo("projects/" + gceProjectId);
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly("some-gce-key", "some-gce-value", "project_id", gceProjectId);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(
+ BIGTABLE_PROJECT_ID_KEY.getKey(),
+ projectId,
+ INSTANCE_ID_KEY.getKey(),
+ instanceId,
+ APP_PROFILE_KEY.getKey(),
+ appProfileId,
+ CLIENT_NAME_KEY.getKey(),
+ clientName,
+ CLIENT_UID_KEY.getKey(),
+ taskId);
+ }
+
+ private static class FakeMetricServiceClient extends MetricServiceClient {
+
+ protected FakeMetricServiceClient(MetricServiceStub stub) {
+ super(stub);
+ }
+ }
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
index 5d16b623fd..a12dd3cfbd 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
@@ -45,7 +45,6 @@
import com.google.cloud.bigtable.data.v2.models.SampleRowKeysRequest;
import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
-import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
import com.google.common.collect.ImmutableMap;
import io.grpc.ForwardingServerCall.SimpleForwardingServerCall;
import io.grpc.Metadata;
@@ -126,16 +125,21 @@ public void sendHeaders(Metadata headers) {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings stubSettings =
- settings
- .getStubSettings()
+
+ ClientContext clientContext =
+ EnhancedBigtableStub.createClientContext(settings.getStubSettings());
+ clientContext =
+ clientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder()))
+ settings.getStubSettings(),
+ Tags.getTagger(),
+ localStats.getStatsRecorder(),
+ null))
.build();
- attempts = stubSettings.readRowsSettings().getRetrySettings().getMaxAttempts();
- stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings));
+ attempts = settings.getStubSettings().readRowsSettings().getRetrySettings().getMaxAttempts();
+ stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext);
// Create another server without injecting the server-timing header and another stub that
// connects to it.
@@ -147,18 +151,21 @@ public void sendHeaders(Metadata headers) {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings noHeaderStubSettings =
- noHeaderSettings
- .getStubSettings()
+
+ ClientContext noHeaderClientContext =
+ EnhancedBigtableStub.createClientContext(noHeaderSettings.getStubSettings());
+ noHeaderClientContext =
+ noHeaderClientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
noHeaderSettings.getStubSettings(),
Tags.getTagger(),
- localStats.getStatsRecorder()))
+ localStats.getStatsRecorder(),
+ null))
.build();
noHeaderStub =
- new EnhancedBigtableStub(noHeaderStubSettings, ClientContext.create(noHeaderStubSettings));
+ new EnhancedBigtableStub(noHeaderSettings.getStubSettings(), noHeaderClientContext);
}
@After
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
new file mode 100644
index 0000000000..09b7e1f663
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.api.core.InternalApi;
+import com.google.protobuf.Timestamp;
+import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+import org.junit.Assert;
+
+@InternalApi
+public class BuiltinMetricsTestUtils {
+
+ private BuiltinMetricsTestUtils() {}
+
+ public static MetricData getMetricData(Collection allMetricData, String metricName) {
+ List metricDataList =
+ allMetricData.stream()
+ .filter(md -> md.getName().equals(BuiltinMetricsConstants.METER_NAME + metricName))
+ .collect(Collectors.toList());
+ if (metricDataList.size() == 0) {
+ allMetricData.stream().forEach(md -> System.out.println(md.getName()));
+ }
+ assertThat(metricDataList.size()).isEqualTo(1);
+
+ return metricDataList.get(0);
+ }
+
+ public static long getAggregatedValue(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ HistogramPointData hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return (long) hd.getSum() / hd.getCount();
+ case LONG_SUM:
+ LongPointData ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return ld.getValue();
+ default:
+ return 0;
+ }
+ }
+
+ public static Timestamp getStartTimeSeconds(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ HistogramPointData hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return Timestamps.fromNanos(hd.getStartEpochNanos());
+ case LONG_SUM:
+ LongPointData ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return Timestamps.fromNanos(ld.getStartEpochNanos());
+ default:
+ return Timestamp.getDefaultInstance();
+ }
+ }
+
+ public static void verifyAttributes(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ List hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList());
+ assertThat(hd).isNotEmpty();
+ break;
+ case LONG_SUM:
+ List ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList());
+ assertThat(ld).isNotEmpty();
+ break;
+ default:
+ Assert.fail("Unexpected type");
+ }
+ }
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
index 06b923cad3..2dd4bcabb3 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
@@ -15,14 +15,24 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
-import static com.google.api.gax.tracing.ApiTracerFactory.OperationType;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes;
import static com.google.common.truth.Truth.assertThat;
-import static org.junit.Assert.assertThrows;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
import com.google.api.client.util.Lists;
import com.google.api.core.ApiFunction;
@@ -36,7 +46,6 @@
import com.google.api.gax.rpc.NotFoundException;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.StreamController;
-import com.google.api.gax.tracing.SpanName;
import com.google.bigtable.v2.BigtableGrpc;
import com.google.bigtable.v2.MutateRowRequest;
import com.google.bigtable.v2.MutateRowResponse;
@@ -45,6 +54,7 @@
import com.google.bigtable.v2.ReadRowsRequest;
import com.google.bigtable.v2.ReadRowsResponse;
import com.google.bigtable.v2.ResponseParams;
+import com.google.cloud.bigtable.Version;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId;
@@ -52,9 +62,9 @@
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
+import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapper;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Range;
import com.google.protobuf.ByteString;
@@ -77,11 +87,21 @@
import io.grpc.StatusRuntimeException;
import io.grpc.stub.ServerCallStreamObserver;
import io.grpc.stub.StreamObserver;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
import java.nio.charset.Charset;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -92,12 +112,8 @@
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Captor;
-import org.mockito.Mock;
import org.mockito.junit.MockitoJUnit;
import org.mockito.junit.MockitoRule;
-import org.mockito.stubbing.Answer;
import org.threeten.bp.Duration;
@RunWith(JUnit4.class)
@@ -105,8 +121,8 @@ public class BuiltinMetricsTracerTest {
private static final String PROJECT_ID = "fake-project";
private static final String INSTANCE_ID = "fake-instance";
private static final String APP_PROFILE_ID = "default";
- private static final String TABLE_ID = "fake-table";
- private static final String AUTHORIZED_VIEW_ID = "fake-authorized-view";
+ private static final String TABLE = "fake-table";
+
private static final String BAD_TABLE_ID = "non-exist-table";
private static final String ZONE = "us-west-1";
private static final String CLUSTER = "cluster-0";
@@ -114,6 +130,7 @@ public class BuiltinMetricsTracerTest {
private static final long SERVER_LATENCY = 100;
private static final long APPLICATION_LATENCY = 200;
private static final long SLEEP_VARIABILITY = 15;
+ private static final String CLIENT_NAME = "java-bigtable/" + Version.VERSION;
private static final long CHANNEL_BLOCKING_LATENCY = 75;
@@ -124,18 +141,35 @@ public class BuiltinMetricsTracerTest {
private EnhancedBigtableStub stub;
- @Mock private BuiltinMetricsTracerFactory mockFactory;
- @Mock private StatsRecorderWrapper statsRecorderWrapper;
+ private int batchElementCount = 2;
- @Captor private ArgumentCaptor status;
- @Captor private ArgumentCaptor tableId;
- @Captor private ArgumentCaptor zone;
- @Captor private ArgumentCaptor cluster;
+ private Attributes baseAttributes;
- private int batchElementCount = 2;
+ private InMemoryMetricReader metricReader;
@Before
public void setUp() throws Exception {
+ metricReader = InMemoryMetricReader.create();
+
+ baseAttributes =
+ Attributes.builder()
+ .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, PROJECT_ID)
+ .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, INSTANCE_ID)
+ .put(BuiltinMetricsConstants.APP_PROFILE_KEY, APP_PROFILE_ID)
+ .build();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ meterProvider.registerView(entry.getKey(), entry.getValue());
+ }
+
+ OpenTelemetrySdk otel =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+ BuiltinMetricsTracerFactory facotry = BuiltinMetricsTracerFactory.create(otel, baseAttributes);
+
// Add an interceptor to add server-timing in headers
ServerInterceptor trailersInterceptor =
new ServerInterceptor() {
@@ -216,7 +250,8 @@ public void sendMessage(ReqT message) {
.setMaxOutstandingRequestBytes(1001L)
.build())
.build());
- stubSettingsBuilder.setTracerFactory(mockFactory);
+
+ stubSettingsBuilder.setTracerFactory(facotry);
InstantiatingGrpcChannelProvider.Builder channelProvider =
((InstantiatingGrpcChannelProvider) stubSettingsBuilder.getTransportChannelProvider())
@@ -247,117 +282,117 @@ public void tearDown() {
@Test
public void testReadRowsOperationLatencies() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
Stopwatch stopwatch = Stopwatch.createStarted();
- Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator());
+ Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)).iterator());
long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- // verify record operation is only called once
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(STREAMING_KEY, true)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
- assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed));
- assertThat(status.getAllValues()).containsExactly("OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly(ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed));
}
@Test
public void testReadRowsOperationLatenciesOnAuthorizedView() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
+ String authorizedViewId = "test-authorized-view-id";
Stopwatch stopwatch = Stopwatch.createStarted();
Lists.newArrayList(
- stub.readRowsCallable()
- .call(Query.create(AuthorizedViewId.of(TABLE_ID, AUTHORIZED_VIEW_ID)))
- .iterator());
+ stub.readRowsCallable().call(Query.create(AuthorizedViewId.of(TABLE, authorizedViewId))));
long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- // verify record operation is only called once
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(STREAMING_KEY, true)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed));
- assertThat(status.getAllValues()).containsExactly("OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly(ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER);
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed));
}
@Test
public void testGfeMetrics() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor gfeLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor gfeMissingHeaders = ArgumentCaptor.forClass(Long.class);
-
- Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)));
-
- // Verify record attempt are called multiple times
- verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
-
- // The request was retried and gfe latency is only recorded in the retry attempt
- verify(statsRecorderWrapper).putGfeLatencies(gfeLatency.capture());
- assertThat(gfeLatency.getValue()).isEqualTo(FAKE_SERVER_TIMING);
-
- // The first time the request was retried, it'll increment missing header counter
- verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get()))
- .putGfeMissingHeaders(gfeMissingHeaders.capture());
- assertThat(gfeMissingHeaders.getAllValues()).containsExactly(1L, 0L);
-
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly("global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER);
+ Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)));
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData serverLatenciesMetricData = getMetricData(allMetricData, SERVER_LATENCIES_NAME);
+
+ long serverLatencies = getAggregatedValue(serverLatenciesMetricData, expectedAttributes);
+ assertThat(serverLatencies).isEqualTo(FAKE_SERVER_TIMING);
+
+ MetricData connectivityErrorCountMetricData =
+ getMetricData(allMetricData, CONNECTIVITY_ERROR_COUNT_NAME);
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ verifyAttributes(connectivityErrorCountMetricData, expected1);
+ verifyAttributes(connectivityErrorCountMetricData, expected2);
+
+ assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected1)).isEqualTo(1);
+ assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected2)).isEqualTo(0);
}
@Test
public void testReadRowsApplicationLatencyWithAutoFlowControl() throws Exception {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
final SettableApiFuture future = SettableApiFuture.create();
final AtomicInteger counter = new AtomicInteger(0);
// For auto flow control, application latency is the time application spent in onResponse.
stub.readRowsCallable()
.call(
- Query.create(TABLE_ID),
+ Query.create(TABLE),
new ResponseObserver() {
@Override
public void onStart(StreamController streamController) {}
@@ -383,37 +418,38 @@ public void onComplete() {
});
future.get();
- verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture());
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
-
assertThat(counter.get()).isEqualTo(fakeService.getResponseCounter().get());
- // Thread.sleep might not sleep for the requested amount depending on the interrupt period
- // defined by the OS.
- // On linux this is ~1ms but on windows may be as high as 15-20ms.
- assertThat(applicationLatency.getValue())
- .isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get());
- assertThat(applicationLatency.getValue())
- .isAtMost(operationLatency.getValue() - SERVER_LATENCY);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency =
+ getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME);
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+
+ assertThat(value).isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get());
+
+ MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long operationLatencyValue =
+ getAggregatedValue(
+ operationLatency,
+ expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build());
+ assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY);
}
@Test
public void testReadRowsApplicationLatencyWithManualFlowControl() throws Exception {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
int counter = 0;
- Iterator rows = stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator();
+ Iterator rows = stub.readRowsCallable().call(Query.create(TABLE)).iterator();
while (rows.hasNext()) {
counter++;
@@ -421,148 +457,189 @@ public void testReadRowsApplicationLatencyWithManualFlowControl() throws Excepti
rows.next();
}
- verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture());
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency =
+ getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME);
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
- // For manual flow control, the last application latency shouldn't count, because at that point
- // the server already sent back all the responses.
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+ // For manual flow control, the last application latency shouldn't count, because at that
+ // point the server already sent back all the responses.
assertThat(counter).isEqualTo(fakeService.getResponseCounter().get());
- assertThat(applicationLatency.getValue())
- .isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY);
- assertThat(applicationLatency.getValue())
- .isAtMost(operationLatency.getValue() - SERVER_LATENCY);
+ assertThat(value).isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY);
+
+ MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long operationLatencyValue =
+ getAggregatedValue(
+ operationLatency,
+ expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build());
+ assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY);
}
@Test
- public void testRetryCount() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "MutateRow"),
- statsRecorderWrapper));
-
- ArgumentCaptor retryCount = ArgumentCaptor.forClass(Integer.class);
-
+ public void testRetryCount() throws InterruptedException {
stub.mutateRowCallable()
- .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value"));
-
- // In TracedUnaryCallable, we create a future and add a TraceFinisher to the callback. Main
- // thread is blocked on waiting for the future to be completed. When onComplete is called on
- // the grpc thread, the future is completed, however we might not have enough time for
- // TraceFinisher to run. Add a 1 second time out to wait for the callback. This shouldn't have
- // any impact on production code.
- verify(statsRecorderWrapper, timeout(1000)).putRetryCount(retryCount.capture());
+ .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value"));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, RETRY_COUNT_NAME);
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(STATUS_KEY, "OK")
+ .build();
- assertThat(retryCount.getValue()).isEqualTo(fakeService.getAttemptCounter().get() - 1);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isEqualTo(fakeService.getAttemptCounter().get() - 1);
}
@Test
public void testMutateRowAttemptsTagValues() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper));
-
stub.mutateRowCallable()
- .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value"));
-
- // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set
- // attempt succeeded and set the response which will call complete() in AbstractFuture which
- // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be
- // called after the mutateRow call is returned. So there's a race between when the call returns
- // and when the record() is called in onOperationCompletion().
- verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
- assertThat(zone.getAllValues()).containsExactly("global", "global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", CLUSTER);
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "UNAVAILABLE", "OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID, TABLE_ID);
+ .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value"));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
+
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
+
+ verifyAttributes(metricData, expected1);
+ verifyAttributes(metricData, expected2);
}
@Test
public void testMutateRowsPartialError() throws InterruptedException {
+ Batcher batcher = stub.newMutateRowsBatcher(TableId.of(TABLE), null);
int numMutations = 6;
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
-
- Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null);
for (int i = 0; i < numMutations; i++) {
String key = i % 2 == 0 ? "key" : "fail-key";
batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v"));
}
- assertThrows(BatchingException.class, () -> batcher.close());
-
- int expectedNumRequests = numMutations / batchElementCount;
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Assert.assertThrows(BatchingException.class, batcher::close);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
- assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER);
- assertThat(status.getAllValues()).containsExactly("OK", "OK", "OK");
+ verifyAttributes(metricData, expected);
}
@Test
public void testMutateRowsRpcError() {
+ Batcher batcher =
+ stub.newMutateRowsBatcher(TableId.of(BAD_TABLE_ID), null);
int numMutations = 6;
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
-
- Batcher batcher = stub.newMutateRowsBatcher(BAD_TABLE_ID, null);
for (int i = 0; i < numMutations; i++) {
- batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v"));
+ String key = i % 2 == 0 ? "key" : "fail-key";
+ batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v"));
}
- assertThrows(BatchingException.class, () -> batcher.close());
-
- int expectedNumRequests = numMutations / batchElementCount;
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Assert.assertThrows(BatchingException.class, batcher::close);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "NOT_FOUND")
+ .put(TABLE_ID_KEY, BAD_TABLE_ID)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
- assertThat(zone.getAllValues()).containsExactly("global", "global", "global");
- assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", "unspecified");
- assertThat(status.getAllValues()).containsExactly("NOT_FOUND", "NOT_FOUND", "NOT_FOUND");
+ verifyAttributes(metricData, expected);
}
@Test
public void testReadRowsAttemptsTagValues() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
Lists.newArrayList(stub.readRowsCallable().call(Query.create("fake-table")).iterator());
- // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set
- // attempt succeeded and set the response which will call complete() in AbstractFuture which
- // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be
- // called after the mutateRow call is returned. So there's a race between when the call returns
- // and when the record() is called in onOperationCompletion().
- verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
- assertThat(zone.getAllValues()).containsExactly("global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER);
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK");
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, true)
+ .build();
+
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, true)
+ .build();
+
+ verifyAttributes(metricData, expected1);
+ verifyAttributes(metricData, expected2);
}
@Test
public void testBatchBlockingLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
- try (Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null)) {
+ try (Batcher batcher = stub.newMutateRowsBatcher(TABLE, null)) {
for (int i = 0; i < 6; i++) {
batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v"));
}
@@ -571,86 +648,100 @@ public void testBatchBlockingLatencies() throws InterruptedException {
batcher.close();
int expectedNumRequests = 6 / batchElementCount;
- ArgumentCaptor throttledTime = ArgumentCaptor.forClass(Long.class);
- verify(statsRecorderWrapper, timeout(1000).times(expectedNumRequests))
- .putClientBlockingLatencies(throttledTime.capture());
- // After the first request is sent, batcher will block on add because of the server latency.
- // Blocking latency should be around server latency.
- assertThat(throttledTime.getAllValues().get(1)).isAtLeast(SERVER_LATENCY - 10);
- assertThat(throttledTime.getAllValues().get(2)).isAtLeast(SERVER_LATENCY - 10);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER);
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+ // After the first request is sent, batcher will block on add because of the server latency.
+ // Blocking latency should be around server latency. So each data point would be at least
+ // (SERVER_LATENCY - 10).
+ long expected = (SERVER_LATENCY - 10) * (expectedNumRequests - 1) / expectedNumRequests;
+ assertThat(value).isAtLeast(expected);
}
}
@Test
- public void testQueuedOnChannelServerStreamLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- stub.readRowsCallable().all().call(Query.create(TABLE_ID));
-
- ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class);
-
- verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get()))
- .putClientBlockingLatencies(blockedTime.capture());
+ public void testQueuedOnChannelServerStreamLatencies() {
+ stub.readRowsCallable().all().call(Query.create(TABLE));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
+
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
+ long value = getAggregatedValue(clientLatency, attributes);
+ assertThat(value).isAtLeast(CHANNEL_BLOCKING_LATENCY);
}
@Test
- public void testQueuedOnChannelUnaryLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper));
- stub.mutateRowCallable().call(RowMutation.create(TABLE_ID, "a-key").setCell("f", "q", "v"));
+ public void testQueuedOnChannelUnaryLatencies() {
- ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class);
+ stub.mutateRowCallable().call(RowMutation.create(TABLE, "a-key").setCell("f", "q", "v"));
- verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get()))
- .putClientBlockingLatencies(blockedTime.capture());
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
- assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
- assertThat(blockedTime.getAllValues().get(2)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ long expected = CHANNEL_BLOCKING_LATENCY * 2 / 3;
+ long actual = getAggregatedValue(clientLatency, attributes);
+ assertThat(actual).isAtLeast(expected);
}
@Test
public void testPermanentFailure() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
try {
Lists.newArrayList(stub.readRowsCallable().call(Query.create(BAD_TABLE_ID)).iterator());
Assert.fail("Request should throw not found error");
} catch (NotFoundException e) {
}
- ArgumentCaptor attemptLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData attemptLatency = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "NOT_FOUND")
+ .put(TABLE_ID_KEY, BAD_TABLE_ID)
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(ZONE_ID_KEY, "global")
+ .put(STREAMING_KEY, true)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- verify(statsRecorderWrapper, timeout(50)).putAttemptLatencies(attemptLatency.capture());
- verify(statsRecorderWrapper, timeout(50)).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper, timeout(50))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ verifyAttributes(attemptLatency, expected);
- assertThat(status.getValue()).isEqualTo("NOT_FOUND");
- assertThat(tableId.getValue()).isEqualTo(BAD_TABLE_ID);
- assertThat(cluster.getValue()).isEqualTo("unspecified");
- assertThat(zone.getValue()).isEqualTo("global");
+ MetricData opLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ verifyAttributes(opLatency, expected);
}
private static class FakeService extends BigtableGrpc.BigtableImplBase {
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
index a6670182b8..4ab19a5337 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
@@ -23,17 +23,29 @@
import com.google.api.gax.grpc.ChannelPoolSettings;
import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider;
import com.google.bigtable.v2.*;
+import com.google.cloud.bigtable.Version;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
import com.google.cloud.bigtable.data.v2.models.*;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection;
import io.grpc.Server;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.grpc.stub.StreamObserver;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import org.junit.After;
import org.junit.Before;
@@ -51,25 +63,50 @@ public class ErrorCountPerConnectionTest {
private final FakeService fakeService = new FakeService();
private EnhancedBigtableStubSettings.Builder builder;
private ArgumentCaptor runnableCaptor;
- private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection;
+
+ private InMemoryMetricReader metricReader;
+
+ private Attributes attributes;
@Before
public void setup() throws Exception {
server = FakeServiceBuilder.create(fakeService).start();
ScheduledExecutorService executors = Mockito.mock(ScheduledExecutorService.class);
+
+ attributes =
+ Attributes.builder()
+ .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, "fake-project")
+ .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, "fake-instance")
+ .put(BuiltinMetricsConstants.APP_PROFILE_KEY, "")
+ .put(BuiltinMetricsConstants.CLIENT_NAME_KEY, "bigtable-java/" + Version.VERSION)
+ .build();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ meterProvider.registerView(entry.getKey(), entry.getValue());
+ }
+
+ OpenTelemetrySdk otel =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
builder =
BigtableDataSettings.newBuilderForEmulator(server.getPort())
.stubSettings()
.setBackgroundExecutorProvider(FixedExecutorProvider.create(executors))
.setProjectId("fake-project")
- .setInstanceId("fake-instance");
+ .setInstanceId("fake-instance")
+ .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(otel));
+
runnableCaptor = ArgumentCaptor.forClass(Runnable.class);
Mockito.when(
executors.scheduleAtFixedRate(runnableCaptor.capture(), anyLong(), anyLong(), any()))
.thenReturn(null);
-
- statsRecorderWrapperForConnection = Mockito.mock(StatsRecorderWrapperForConnection.class);
}
@After
@@ -98,14 +135,21 @@ public void readWithOneChannel() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
+
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
+
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+
+ // Make sure the correct bucket is updated with the correct number of data points
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index = findDataPointIndex(point.getBoundaries(), errorCount);
+ assertThat(point.getCounts().get(index)).isEqualTo(1);
}
@Test
@@ -131,28 +175,35 @@ public void readWithTwoChannels() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(2);
- // Requests get assigned to channels using a Round Robin algorithm, so half to each.
- assertThat(allErrorCounts).containsExactly(totalErrorCount / 2, totalErrorCount / 2);
+ long errorCountPerChannel = totalErrorCount / 2;
+
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+
+ // The 2 channels should get equal amount of errors, so the totalErrorCount / 2 bucket is
+ // updated twice.
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index = findDataPointIndex(point.getBoundaries(), errorCountPerChannel);
+ assertThat(point.getCounts().get(index)).isEqualTo(2);
}
@Test
public void readOverTwoPeriods() throws Exception {
EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build());
- long errorCount = 0;
+ long errorCount1 = 0;
for (int i = 0; i < 20; i++) {
Query query;
if (i % 3 == 0) {
query = Query.create(ERROR_TABLE_NAME);
- errorCount += 1;
+ errorCount1 += 1;
} else {
query = Query.create(SUCCESS_TABLE_NAME);
}
@@ -162,16 +213,9 @@ public void readOverTwoPeriods() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
- runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
- errorCount = 0;
+ runInterceptorTasksAndAssertCount();
+ long errorCount2 = 0;
for (int i = 0; i < 20; i++) {
Query query;
@@ -179,7 +223,7 @@ public void readOverTwoPeriods() throws Exception {
query = Query.create(SUCCESS_TABLE_NAME);
} else {
query = Query.create(ERROR_TABLE_NAME);
- errorCount += 1;
+ errorCount2 += 1;
}
try {
stub.readRowsCallable().call(query).iterator().hasNext();
@@ -187,27 +231,22 @@ public void readOverTwoPeriods() throws Exception {
// noop
}
}
- errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
+
runInterceptorTasksAndAssertCount();
- allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
- }
- @Test
- public void ignoreInactiveConnection() throws Exception {
- EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build());
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
- runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts).isEmpty();
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index1 = findDataPointIndex(point.getBoundaries(), errorCount1);
+ int index2 = findDataPointIndex(point.getBoundaries(), errorCount2);
+ assertThat(point.getCounts().get(index1)).isEqualTo(1);
+ assertThat(point.getCounts().get(index2)).isEqualTo(1);
}
@Test
@@ -221,22 +260,19 @@ public void noFailedRequests() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(0);
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+ long value = BuiltinMetricsTestUtils.getAggregatedValue(metricData, attributes);
+ assertThat(value).isEqualTo(0);
}
private void runInterceptorTasksAndAssertCount() {
int actualNumOfTasks = 0;
for (Runnable runnable : runnableCaptor.getAllValues()) {
if (runnable instanceof ErrorCountPerConnectionMetricTracker) {
- ((ErrorCountPerConnectionMetricTracker) runnable)
- .setStatsRecorderWrapperForConnection(statsRecorderWrapperForConnection);
runnable.run();
actualNumOfTasks++;
}
@@ -244,6 +280,16 @@ private void runInterceptorTasksAndAssertCount() {
assertThat(actualNumOfTasks).isEqualTo(1);
}
+ private int findDataPointIndex(List boundaries, long dataPoint) {
+ int index = 0;
+ for (; index < boundaries.size(); index++) {
+ if (boundaries.get(index) >= dataPoint) {
+ break;
+ }
+ }
+ return index;
+ }
+
static class FakeService extends BigtableGrpc.BigtableImplBase {
@Override
public void readRows(
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
index 15bd9171f0..d72eac4056 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
@@ -39,7 +39,6 @@
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
-import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableMap;
@@ -120,15 +119,20 @@ public void setUp() throws Exception {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings stubSettings =
- settings
- .getStubSettings()
+
+ ClientContext clientContext =
+ EnhancedBigtableStub.createClientContext(settings.getStubSettings());
+ clientContext =
+ clientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder()))
+ settings.getStubSettings(),
+ Tags.getTagger(),
+ localStats.getStatsRecorder(),
+ null))
.build();
- stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings));
+ stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext);
}
@After
diff --git a/pom.xml b/pom.xml
index f193e7e852..c1c9404fa0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -347,22 +347,6 @@
-
-
-
- with-shaded
-
-
- !skip-shaded
-
-
-
- google-cloud-bigtable-stats
-
-