com.google.truth
truth
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java
index e9befe0974..257406eb45 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java
@@ -20,6 +20,6 @@
@InternalApi("For internal use only")
public final class Version {
// {x-version-update-start:google-cloud-bigtable:current}
- public static String VERSION = "2.37.0";
+ public static String VERSION = "2.38.0";
// {x-version-update-end}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
index c35500a189..9b2f2e345f 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java
@@ -19,7 +19,10 @@
import com.google.api.gax.core.BackgroundResource;
import com.google.api.gax.rpc.ClientContext;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
+import io.opentelemetry.api.OpenTelemetry;
import java.io.IOException;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import javax.annotation.Nonnull;
/**
@@ -62,8 +65,12 @@
*/
@BetaApi("This feature is currently experimental and can change in the future")
public final class BigtableDataClientFactory implements AutoCloseable {
+
+ private static final Logger logger = Logger.getLogger(BigtableDataClientFactory.class.getName());
+
private final BigtableDataSettings defaultSettings;
private final ClientContext sharedClientContext;
+ private final OpenTelemetry openTelemetry;
/**
* Create a instance of this factory.
@@ -75,13 +82,28 @@ public static BigtableDataClientFactory create(BigtableDataSettings defaultSetti
throws IOException {
ClientContext sharedClientContext =
EnhancedBigtableStub.createClientContext(defaultSettings.getStubSettings());
- return new BigtableDataClientFactory(sharedClientContext, defaultSettings);
+ OpenTelemetry openTelemetry = null;
+ try {
+ // We don't want client side metrics to crash the client, so catch any exception when getting
+ // the OTEL instance and log the exception instead.
+ openTelemetry =
+ EnhancedBigtableStub.getOpenTelemetry(
+ defaultSettings.getProjectId(),
+ defaultSettings.getMetricsProvider(),
+ sharedClientContext.getCredentials());
+ } catch (Throwable t) {
+ logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t);
+ }
+ return new BigtableDataClientFactory(sharedClientContext, defaultSettings, openTelemetry);
}
private BigtableDataClientFactory(
- ClientContext sharedClientContext, BigtableDataSettings defaultSettings) {
+ ClientContext sharedClientContext,
+ BigtableDataSettings defaultSettings,
+ OpenTelemetry openTelemetry) {
this.sharedClientContext = sharedClientContext;
this.defaultSettings = defaultSettings;
+ this.openTelemetry = openTelemetry;
}
/**
@@ -112,7 +134,7 @@ public BigtableDataClient createDefault() {
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- defaultSettings.getStubSettings()))
+ defaultSettings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(defaultSettings, clientContext);
@@ -140,7 +162,8 @@ public BigtableDataClient createForAppProfile(@Nonnull String appProfileId) thro
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
}
@@ -168,7 +191,8 @@ public BigtableDataClient createForInstance(@Nonnull String projectId, @Nonnull
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
@@ -197,7 +221,8 @@ public BigtableDataClient createForInstance(
sharedClientContext
.toBuilder()
.setTracerFactory(
- EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings()))
+ EnhancedBigtableStub.createBigtableTracerFactory(
+ settings.getStubSettings(), openTelemetry))
.build();
return BigtableDataClient.createWithClientContext(settings, clientContext);
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
index 701a5e8e49..928159aa6d 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java
@@ -25,19 +25,16 @@
import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider;
import com.google.api.gax.rpc.UnaryCallSettings;
import com.google.auth.Credentials;
-import com.google.auth.oauth2.GoogleCredentials;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.stub.BigtableBatchingCallSettings;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.BigtableStackdriverStatsExporter;
-import com.google.cloud.bigtable.stats.BuiltinViews;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.common.base.MoreObjects;
import com.google.common.base.Strings;
import io.grpc.ManagedChannelBuilder;
import java.io.IOException;
import java.util.List;
-import java.util.concurrent.atomic.AtomicBoolean;
import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
@@ -77,7 +74,10 @@ public final class BigtableDataSettings {
private static final Logger LOGGER = Logger.getLogger(BigtableDataSettings.class.getName());
private static final String BIGTABLE_EMULATOR_HOST_ENV_VAR = "BIGTABLE_EMULATOR_HOST";
- private static final AtomicBoolean BUILTIN_METRICS_REGISTERED = new AtomicBoolean(false);
+ // This is the legacy credential override used in the deprecated enableBuiltinMetrics method to
+ // override the default credentials set on the Bigtable client. Keeping it for backward
+ // compatibility.
+ @Deprecated @Nullable private static Credentials legacyMetricCredentialOverride;
private final EnhancedBigtableStubSettings stubSettings;
@@ -197,23 +197,34 @@ public static void enableGfeOpenCensusStats() {
com.google.cloud.bigtable.data.v2.stub.metrics.RpcViews.registerBigtableClientGfeViews();
}
- /** Register built in metrics. */
- public static void enableBuiltinMetrics() throws IOException {
- if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) {
- BuiltinViews.registerBigtableBuiltinViews();
- BigtableStackdriverStatsExporter.register(GoogleCredentials.getApplicationDefault());
- }
- }
+ /**
+ * Register built in metrics.
+ *
+ * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default
+ * now. Please refer to {@link
+ * BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} on how to enable or
+ * disable built-in metrics.
+ */
+ @Deprecated
+ public static void enableBuiltinMetrics() throws IOException {}
/**
* Register built in metrics with credentials. The credentials need to have metric write access
* for all the projects you're publishing to.
+ *
+ * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default
+ * now. Please refer {@link BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}
+ * on how to enable or disable built-in metrics.
*/
+ @Deprecated
public static void enableBuiltinMetrics(Credentials credentials) throws IOException {
- if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) {
- BuiltinViews.registerBigtableBuiltinViews();
- BigtableStackdriverStatsExporter.register(credentials);
- }
+ BigtableDataSettings.legacyMetricCredentialOverride = credentials;
+ }
+
+ /** Get the metrics credentials if it's set by {@link #enableBuiltinMetrics(Credentials)}. */
+ @InternalApi
+ public static Credentials getMetricsCredentials() {
+ return legacyMetricCredentialOverride;
}
/** Returns the target project id. */
@@ -278,6 +289,11 @@ public boolean isBulkMutationFlowControlEnabled() {
return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled();
}
+ /** Gets the {@link MetricsProvider}. * */
+ public MetricsProvider getMetricsProvider() {
+ return stubSettings.getMetricsProvider();
+ }
+
/** Returns the underlying RPC settings. */
public EnhancedBigtableStubSettings getStubSettings() {
return stubSettings;
@@ -527,6 +543,30 @@ public boolean isBulkMutationFlowControlEnabled() {
return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled();
}
+ /**
+ * Sets the {@link MetricsProvider}.
+ *
+ * By default, this is set to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will
+ * collect and export client side metrics.
+ *
+ *
To disable client side metrics, set it to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}.
+ *
+ *
To use a custom OpenTelemetry instance, refer to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to
+ * set it up.
+ */
+ public Builder setMetricsProvider(MetricsProvider metricsProvider) {
+ stubSettings.setMetricsProvider(metricsProvider);
+ return this;
+ }
+
+ /** Gets the {@link MetricsProvider}. */
+ public MetricsProvider getMetricsProvider() {
+ return stubSettings.getMetricsProvider();
+ }
+
/**
* Returns the underlying settings for making RPC calls. The settings should be changed with
* care.
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
index ec15c4131a..57d9748cca 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java
@@ -15,6 +15,11 @@
*/
package com.google.cloud.bigtable.data.v2.stub;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+
import com.google.api.core.ApiFunction;
import com.google.api.core.BetaApi;
import com.google.api.core.InternalApi;
@@ -68,6 +73,7 @@
import com.google.bigtable.v2.RowRange;
import com.google.bigtable.v2.SampleRowKeysResponse;
import com.google.cloud.bigtable.Version;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience;
import com.google.cloud.bigtable.data.v2.internal.NameUtil;
import com.google.cloud.bigtable.data.v2.internal.RequestContext;
@@ -97,8 +103,12 @@
import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable;
import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory;
import com.google.cloud.bigtable.data.v2.stub.metrics.CompositeTracerFactory;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
+import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.ErrorCountPerConnectionMetricTracker;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsTracerFactory;
+import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants;
import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersServerStreamingCallable;
import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersUnaryCallable;
@@ -130,6 +140,8 @@
import io.opencensus.tags.TagValue;
import io.opencensus.tags.Tagger;
import io.opencensus.tags.Tags;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
@@ -137,6 +149,8 @@
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
+import java.util.logging.Level;
+import java.util.logging.Logger;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
@@ -154,6 +168,9 @@
*/
@InternalApi
public class EnhancedBigtableStub implements AutoCloseable {
+
+ private static final Logger logger = Logger.getLogger(EnhancedBigtableStub.class.getName());
+
private static final String CLIENT_NAME = "Bigtable";
private static final long FLOW_CONTROL_ADJUSTING_INTERVAL_MS = TimeUnit.SECONDS.toMillis(20);
private final EnhancedBigtableStubSettings settings;
@@ -185,10 +202,25 @@ public class EnhancedBigtableStub implements AutoCloseable {
public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings)
throws IOException {
- settings = settings.toBuilder().setTracerFactory(createBigtableTracerFactory(settings)).build();
ClientContext clientContext = createClientContext(settings);
-
- return new EnhancedBigtableStub(settings, clientContext);
+ OpenTelemetry openTelemetry = null;
+ try {
+ // We don't want client side metrics to crash the client, so catch any exception when getting
+ // the OTEL instance and log the exception instead.
+ openTelemetry =
+ getOpenTelemetry(
+ settings.getProjectId(),
+ settings.getMetricsProvider(),
+ clientContext.getCredentials());
+ } catch (Throwable t) {
+ logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t);
+ }
+ ClientContext contextWithTracer =
+ clientContext
+ .toBuilder()
+ .setTracerFactory(createBigtableTracerFactory(settings, openTelemetry))
+ .build();
+ return new EnhancedBigtableStub(settings, contextWithTracer);
}
public static EnhancedBigtableStub createWithClientContext(
@@ -207,15 +239,33 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
// workaround JWT audience issues
patchCredentials(builder);
+ // Fix the credentials so that they can be shared
+ Credentials credentials = null;
+ if (builder.getCredentialsProvider() != null) {
+ credentials = builder.getCredentialsProvider().getCredentials();
+ }
+ builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials));
+
InstantiatingGrpcChannelProvider.Builder transportProvider =
builder.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider
? ((InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider()).toBuilder()
: null;
+ OpenTelemetry openTelemetry = null;
+ try {
+ // We don't want client side metrics to crash the client, so catch any exception when getting
+ // the OTEL instance and log the exception instead.
+ openTelemetry =
+ getOpenTelemetry(settings.getProjectId(), settings.getMetricsProvider(), credentials);
+ } catch (Throwable t) {
+ logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t);
+ }
ErrorCountPerConnectionMetricTracker errorCountPerConnectionMetricTracker;
- if (transportProvider != null) {
+ // Skip setting up ErrorCountPerConnectionMetricTracker if openTelemetry is null
+ if (openTelemetry != null && transportProvider != null) {
errorCountPerConnectionMetricTracker =
- new ErrorCountPerConnectionMetricTracker(createBuiltinAttributes(builder));
+ new ErrorCountPerConnectionMetricTracker(
+ openTelemetry, createBuiltinAttributes(settings));
ApiFunction oldChannelConfigurator =
transportProvider.getChannelConfigurator();
transportProvider.setChannelConfigurator(
@@ -237,12 +287,6 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
// Inject channel priming
if (settings.isRefreshingChannel()) {
- // Fix the credentials so that they can be shared
- Credentials credentials = null;
- if (builder.getCredentialsProvider() != null) {
- credentials = builder.getCredentialsProvider().getCredentials();
- }
- builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials));
if (transportProvider != null) {
transportProvider.setChannelPrimer(
@@ -267,13 +311,19 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set
}
public static ApiTracerFactory createBigtableTracerFactory(
- EnhancedBigtableStubSettings settings) {
- return createBigtableTracerFactory(settings, Tags.getTagger(), Stats.getStatsRecorder());
+ EnhancedBigtableStubSettings settings, @Nullable OpenTelemetry openTelemetry)
+ throws IOException {
+ return createBigtableTracerFactory(
+ settings, Tags.getTagger(), Stats.getStatsRecorder(), openTelemetry);
}
@VisibleForTesting
public static ApiTracerFactory createBigtableTracerFactory(
- EnhancedBigtableStubSettings settings, Tagger tagger, StatsRecorder stats) {
+ EnhancedBigtableStubSettings settings,
+ Tagger tagger,
+ StatsRecorder stats,
+ @Nullable OpenTelemetry openTelemetry)
+ throws IOException {
String projectId = settings.getProjectId();
String instanceId = settings.getInstanceId();
String appProfileId = settings.getAppProfileId();
@@ -284,10 +334,10 @@ public static ApiTracerFactory createBigtableTracerFactory(
.put(RpcMeasureConstants.BIGTABLE_INSTANCE_ID, TagValue.create(instanceId))
.put(RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID, TagValue.create(appProfileId))
.build();
- ImmutableMap builtinAttributes = createBuiltinAttributes(settings.toBuilder());
- return new CompositeTracerFactory(
- ImmutableList.of(
+ ImmutableList.Builder tracerFactories = ImmutableList.builder();
+ tracerFactories
+ .add(
// Add OpenCensus Tracing
new OpencensusTracerFactory(
ImmutableMap.builder()
@@ -299,22 +349,52 @@ public static ApiTracerFactory createBigtableTracerFactory(
.put("gax", GaxGrpcProperties.getGaxGrpcVersion())
.put("grpc", GaxGrpcProperties.getGrpcVersion())
.put("gapic", Version.VERSION)
- .build()),
- // Add OpenCensus Metrics
- MetricsTracerFactory.create(tagger, stats, attributes),
- BuiltinMetricsTracerFactory.create(builtinAttributes),
- // Add user configured tracer
- settings.getTracerFactory()));
+ .build()))
+ // Add OpenCensus Metrics
+ .add(MetricsTracerFactory.create(tagger, stats, attributes))
+ // Add user configured tracer
+ .add(settings.getTracerFactory());
+ BuiltinMetricsTracerFactory builtinMetricsTracerFactory =
+ openTelemetry != null
+ ? BuiltinMetricsTracerFactory.create(openTelemetry, createBuiltinAttributes(settings))
+ : null;
+ if (builtinMetricsTracerFactory != null) {
+ tracerFactories.add(builtinMetricsTracerFactory);
+ }
+ return new CompositeTracerFactory(tracerFactories.build());
+ }
+
+ @Nullable
+ public static OpenTelemetry getOpenTelemetry(
+ String projectId, MetricsProvider metricsProvider, @Nullable Credentials defaultCredentials)
+ throws IOException {
+ if (metricsProvider instanceof CustomOpenTelemetryMetricsProvider) {
+ CustomOpenTelemetryMetricsProvider customMetricsProvider =
+ (CustomOpenTelemetryMetricsProvider) metricsProvider;
+ return customMetricsProvider.getOpenTelemetry();
+ } else if (metricsProvider instanceof DefaultMetricsProvider) {
+ Credentials credentials =
+ BigtableDataSettings.getMetricsCredentials() != null
+ ? BigtableDataSettings.getMetricsCredentials()
+ : defaultCredentials;
+ DefaultMetricsProvider defaultMetricsProvider = (DefaultMetricsProvider) metricsProvider;
+ return defaultMetricsProvider.getOpenTelemetry(projectId, credentials);
+ } else if (metricsProvider instanceof NoopMetricsProvider) {
+ return null;
+ }
+ throw new IOException("Invalid MetricsProvider type " + metricsProvider);
}
- private static ImmutableMap createBuiltinAttributes(
- EnhancedBigtableStubSettings.Builder builder) {
- return ImmutableMap.builder()
- .put("project_id", builder.getProjectId())
- .put("instance", builder.getInstanceId())
- .put("app_profile", builder.getAppProfileId())
- .put("client_name", "bigtable-java/" + Version.VERSION)
- .build();
+ private static Attributes createBuiltinAttributes(EnhancedBigtableStubSettings settings) {
+ return Attributes.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ settings.getProjectId(),
+ INSTANCE_ID_KEY,
+ settings.getInstanceId(),
+ APP_PROFILE_KEY,
+ settings.getAppProfileId(),
+ CLIENT_NAME_KEY,
+ "bigtable-java/" + Version.VERSION);
}
private static void patchCredentials(EnhancedBigtableStubSettings.Builder settings)
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
index 9a5027c740..f07a8fb7fc 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java
@@ -44,6 +44,8 @@
import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider;
+import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider;
import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor;
import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor;
import com.google.common.base.MoreObjects;
@@ -229,6 +231,8 @@ public class EnhancedBigtableStubSettings extends StubSettings getJwtAudienceMapping() {
return jwtAudienceMapping;
}
+ public MetricsProvider getMetricsProvider() {
+ return metricsProvider;
+ }
+
/**
* Gets if routing cookie is enabled. If true, client will retry a request with extra metadata
* server sent back.
@@ -636,6 +645,8 @@ public static class Builder extends StubSettings.Builder jwtAudienceMapping) {
return this;
}
+ /**
+ * Sets the {@link MetricsProvider}.
+ *
+ * By default, this is set to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will
+ * collect and export client side metrics.
+ *
+ *
To disable client side metrics, set it to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}.
+ *
+ *
To use a custom OpenTelemetry instance, refer to {@link
+ * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to
+ * set it up.
+ */
+ public Builder setMetricsProvider(MetricsProvider metricsProvider) {
+ this.metricsProvider = Preconditions.checkNotNull(metricsProvider);
+ return this;
+ }
+
+ /** Gets the {@link MetricsProvider}. */
+ public MetricsProvider getMetricsProvider() {
+ return this.metricsProvider;
+ }
+
@InternalApi("Used for internal testing")
public Map getJwtAudienceMapping() {
return jwtAudienceMapping;
@@ -1028,6 +1067,11 @@ public EnhancedBigtableStubSettings build() {
featureFlags.setRoutingCookie(this.getEnableRoutingCookie());
featureFlags.setRetryInfo(this.getEnableRetryInfo());
+ // client_Side_metrics_enabled feature flag is only set when a user is running with a
+ // DefaultMetricsProvider. This may cause false negatives when a user registered the
+ // metrics on their CustomOpenTelemetryMetricsProvider.
+ featureFlags.setClientSideMetricsEnabled(
+ this.getMetricsProvider() instanceof DefaultMetricsProvider);
// Serialize the web64 encode the bigtable feature flags
ByteArrayOutputStream boas = new ByteArrayOutputStream();
@@ -1080,6 +1124,7 @@ public String toString() {
generateInitialChangeStreamPartitionsSettings)
.add("readChangeStreamSettings", readChangeStreamSettings)
.add("pingAndWarmSettings", pingAndWarmSettings)
+ .add("metricsProvider", metricsProvider)
.add("parent", super.toString())
.toString();
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
index 6208fce89e..97cc2f73ec 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java
@@ -86,7 +86,7 @@ public void call(
stopwatch.stop();
if (context.getTracer() instanceof BigtableTracer) {
((BigtableTracer) context.getTracer())
- .batchRequestThrottled(stopwatch.elapsed(TimeUnit.MILLISECONDS));
+ .batchRequestThrottled(stopwatch.elapsed(TimeUnit.NANOSECONDS));
}
RateLimitingResponseObserver innerObserver =
new RateLimitingResponseObserver(limiter, lastQpsChangeTime, responseObserver);
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java
new file mode 100644
index 0000000000..81473ae4d4
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java
@@ -0,0 +1,364 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+
+import com.google.api.MonitoredResource;
+import com.google.api.core.ApiFuture;
+import com.google.api.core.ApiFutureCallback;
+import com.google.api.core.ApiFutures;
+import com.google.api.core.InternalApi;
+import com.google.api.gax.core.CredentialsProvider;
+import com.google.api.gax.core.FixedCredentialsProvider;
+import com.google.api.gax.core.NoCredentialsProvider;
+import com.google.auth.Credentials;
+import com.google.cloud.monitoring.v3.MetricServiceClient;
+import com.google.cloud.monitoring.v3.MetricServiceSettings;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
+import com.google.common.util.concurrent.MoreExecutors;
+import com.google.monitoring.v3.CreateTimeSeriesRequest;
+import com.google.monitoring.v3.ProjectName;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Empty;
+import io.opentelemetry.sdk.common.CompletableResultCode;
+import io.opentelemetry.sdk.metrics.InstrumentType;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import java.util.stream.Collectors;
+import javax.annotation.Nullable;
+import org.threeten.bp.Duration;
+
+/**
+ * Bigtable Cloud Monitoring OpenTelemetry Exporter.
+ *
+ * The exporter will look for all bigtable owned metrics under bigtable.googleapis.com
+ * instrumentation scope and upload it via the Google Cloud Monitoring API.
+ */
+@InternalApi
+public final class BigtableCloudMonitoringExporter implements MetricExporter {
+
+ private static final Logger logger =
+ Logger.getLogger(BigtableCloudMonitoringExporter.class.getName());
+
+ // This system property can be used to override the monitoring endpoint
+ // to a different environment. It's meant for internal testing only.
+ private static final String MONITORING_ENDPOINT =
+ MoreObjects.firstNonNull(
+ System.getProperty("bigtable.test-monitoring-endpoint"),
+ MetricServiceSettings.getDefaultEndpoint());
+
+ private static final String APPLICATION_RESOURCE_PROJECT_ID = "project_id";
+
+ private final MetricServiceClient client;
+
+ private final String bigtableProjectId;
+ private final String taskId;
+
+ // The resource the client application is running on
+ private final MonitoredResource applicationResource;
+
+ private final AtomicBoolean isShutdown = new AtomicBoolean(false);
+
+ private CompletableResultCode lastExportCode;
+
+ private static final ImmutableList BIGTABLE_TABLE_METRICS =
+ ImmutableSet.of(
+ OPERATION_LATENCIES_NAME,
+ ATTEMPT_LATENCIES_NAME,
+ SERVER_LATENCIES_NAME,
+ FIRST_RESPONSE_LATENCIES_NAME,
+ CLIENT_BLOCKING_LATENCIES_NAME,
+ APPLICATION_BLOCKING_LATENCIES_NAME,
+ RETRY_COUNT_NAME,
+ CONNECTIVITY_ERROR_COUNT_NAME)
+ .stream()
+ .map(m -> METER_NAME + m)
+ .collect(ImmutableList.toImmutableList());
+
+ private static final ImmutableList APPLICATION_METRICS =
+ ImmutableSet.of(PER_CONNECTION_ERROR_COUNT_NAME).stream()
+ .map(m -> METER_NAME + m)
+ .collect(ImmutableList.toImmutableList());
+
+ public static BigtableCloudMonitoringExporter create(
+ String projectId, @Nullable Credentials credentials) throws IOException {
+ MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder();
+ CredentialsProvider credentialsProvider =
+ Optional.ofNullable(credentials)
+ .map(FixedCredentialsProvider::create)
+ .orElse(NoCredentialsProvider.create());
+ settingsBuilder.setCredentialsProvider(credentialsProvider);
+ settingsBuilder.setEndpoint(MONITORING_ENDPOINT);
+
+ org.threeten.bp.Duration timeout = Duration.ofMinutes(1);
+ // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving
+ // it as not retried for now.
+ settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout);
+
+ // Detect the resource that the client application is running on. For example,
+ // this could be a GCE instance or a GKE pod. Currently, we only support GCE instance and
+ // GKE pod. This method will return null for everything else.
+ MonitoredResource applicationResource = null;
+ try {
+ applicationResource = BigtableExporterUtils.detectResource();
+ } catch (Exception e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to detect resource, will skip exporting application level metrics ",
+ e);
+ }
+
+ return new BigtableCloudMonitoringExporter(
+ projectId,
+ MetricServiceClient.create(settingsBuilder.build()),
+ applicationResource,
+ BigtableExporterUtils.getDefaultTaskValue());
+ }
+
+ @VisibleForTesting
+ BigtableCloudMonitoringExporter(
+ String projectId,
+ MetricServiceClient client,
+ @Nullable MonitoredResource applicationResource,
+ String taskId) {
+ this.client = client;
+ this.taskId = taskId;
+ this.applicationResource = applicationResource;
+ this.bigtableProjectId = projectId;
+ }
+
+ @Override
+ public CompletableResultCode export(Collection collection) {
+ if (isShutdown.get()) {
+ logger.log(Level.WARNING, "Exporter is shutting down");
+ return CompletableResultCode.ofFailure();
+ }
+
+ CompletableResultCode bigtableExportCode = exportBigtableResourceMetrics(collection);
+ CompletableResultCode applicationExportCode = exportApplicationResourceMetrics(collection);
+
+ lastExportCode =
+ CompletableResultCode.ofAll(ImmutableList.of(applicationExportCode, bigtableExportCode));
+
+ return lastExportCode;
+ }
+
+ /** Export metrics associated with a BigtableTable resource. */
+ private CompletableResultCode exportBigtableResourceMetrics(Collection collection) {
+ // Filter bigtable table metrics
+ List bigtableMetricData =
+ collection.stream()
+ .filter(md -> BIGTABLE_TABLE_METRICS.contains(md.getName()))
+ .collect(Collectors.toList());
+
+ // Skips exporting if there's none
+ if (bigtableMetricData.isEmpty()) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ // Verifies metrics project id are the same as the bigtable project id set on this client
+ if (!bigtableMetricData.stream()
+ .flatMap(metricData -> metricData.getData().getPoints().stream())
+ .allMatch(pd -> bigtableProjectId.equals(BigtableExporterUtils.getProjectId(pd)))) {
+ logger.log(Level.WARNING, "Metric data has different a projectId. Skip exporting.");
+ return CompletableResultCode.ofFailure();
+ }
+
+ List bigtableTimeSeries;
+ try {
+ bigtableTimeSeries =
+ BigtableExporterUtils.convertToBigtableTimeSeries(bigtableMetricData, taskId);
+ } catch (Throwable e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to convert bigtable table metric data to cloud monitoring timeseries.",
+ e);
+ return CompletableResultCode.ofFailure();
+ }
+
+ ProjectName projectName = ProjectName.of(bigtableProjectId);
+ CreateTimeSeriesRequest bigtableRequest =
+ CreateTimeSeriesRequest.newBuilder()
+ .setName(projectName.toString())
+ .addAllTimeSeries(bigtableTimeSeries)
+ .build();
+
+ ApiFuture future =
+ this.client.createServiceTimeSeriesCallable().futureCall(bigtableRequest);
+
+ CompletableResultCode bigtableExportCode = new CompletableResultCode();
+ ApiFutures.addCallback(
+ future,
+ new ApiFutureCallback() {
+ @Override
+ public void onFailure(Throwable throwable) {
+ logger.log(
+ Level.WARNING,
+ "createServiceTimeSeries request failed for bigtable metrics. ",
+ throwable);
+ bigtableExportCode.fail();
+ }
+
+ @Override
+ public void onSuccess(Empty empty) {
+ bigtableExportCode.succeed();
+ }
+ },
+ MoreExecutors.directExecutor());
+
+ return bigtableExportCode;
+ }
+
+ /** Export metrics associated with the resource the Application is running on. */
+ private CompletableResultCode exportApplicationResourceMetrics(
+ Collection collection) {
+ if (applicationResource == null) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ // Filter application level metrics
+ List metricData =
+ collection.stream()
+ .filter(md -> APPLICATION_METRICS.contains(md.getName()))
+ .collect(Collectors.toList());
+
+ // Skip exporting if there's none
+ if (metricData.isEmpty()) {
+ return CompletableResultCode.ofSuccess();
+ }
+
+ List timeSeries;
+ try {
+ timeSeries =
+ BigtableExporterUtils.convertToApplicationResourceTimeSeries(
+ metricData, taskId, applicationResource);
+ } catch (Throwable e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to convert application metric data to cloud monitoring timeseries.",
+ e);
+ return CompletableResultCode.ofFailure();
+ }
+
+ // Construct the request. The project id will be the project id of the detected monitored
+ // resource.
+ ApiFuture gceOrGkeFuture;
+ CompletableResultCode exportCode = new CompletableResultCode();
+ try {
+ ProjectName projectName =
+ ProjectName.of(applicationResource.getLabelsOrThrow(APPLICATION_RESOURCE_PROJECT_ID));
+ CreateTimeSeriesRequest request =
+ CreateTimeSeriesRequest.newBuilder()
+ .setName(projectName.toString())
+ .addAllTimeSeries(timeSeries)
+ .build();
+
+ gceOrGkeFuture = this.client.createServiceTimeSeriesCallable().futureCall(request);
+
+ ApiFutures.addCallback(
+ gceOrGkeFuture,
+ new ApiFutureCallback() {
+ @Override
+ public void onFailure(Throwable throwable) {
+ logger.log(
+ Level.WARNING,
+ "createServiceTimeSeries request failed for per connection error metrics.",
+ throwable);
+ exportCode.fail();
+ }
+
+ @Override
+ public void onSuccess(Empty empty) {
+ exportCode.succeed();
+ }
+ },
+ MoreExecutors.directExecutor());
+
+ } catch (Exception e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to get projectName for application resource " + applicationResource);
+ return CompletableResultCode.ofFailure();
+ }
+
+ return exportCode;
+ }
+
+ @Override
+ public CompletableResultCode flush() {
+ if (lastExportCode != null) {
+ return lastExportCode;
+ }
+ return CompletableResultCode.ofSuccess();
+ }
+
+ @Override
+ public CompletableResultCode shutdown() {
+ if (!isShutdown.compareAndSet(false, true)) {
+ logger.log(Level.WARNING, "shutdown is called multiple times");
+ return CompletableResultCode.ofSuccess();
+ }
+ CompletableResultCode flushResult = flush();
+ CompletableResultCode shutdownResult = new CompletableResultCode();
+ flushResult.whenComplete(
+ () -> {
+ Throwable throwable = null;
+ try {
+ client.shutdown();
+ } catch (Throwable e) {
+ logger.log(Level.WARNING, "failed to shutdown the monitoring client", e);
+ throwable = e;
+ }
+ if (throwable != null) {
+ shutdownResult.fail();
+ } else {
+ shutdownResult.succeed();
+ }
+ });
+ return CompletableResultCode.ofAll(Arrays.asList(flushResult, shutdownResult));
+ }
+
+ /**
+ * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a
+ * metric over time.
+ */
+ @Override
+ public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) {
+ return AggregationTemporality.CUMULATIVE;
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java
new file mode 100644
index 0000000000..5bf6688e17
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java
@@ -0,0 +1,367 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.api.Distribution.BucketOptions;
+import static com.google.api.Distribution.BucketOptions.Explicit;
+import static com.google.api.MetricDescriptor.MetricKind;
+import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE;
+import static com.google.api.MetricDescriptor.MetricKind.GAUGE;
+import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED;
+import static com.google.api.MetricDescriptor.ValueType;
+import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION;
+import static com.google.api.MetricDescriptor.ValueType.DOUBLE;
+import static com.google.api.MetricDescriptor.ValueType.INT64;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+
+import com.google.api.Distribution;
+import com.google.api.Metric;
+import com.google.api.MonitoredResource;
+import com.google.cloud.opentelemetry.detection.AttributeKeys;
+import com.google.cloud.opentelemetry.detection.DetectedPlatform;
+import com.google.cloud.opentelemetry.detection.GCPPlatformDetector;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableSet;
+import com.google.monitoring.v3.Point;
+import com.google.monitoring.v3.TimeInterval;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.monitoring.v3.TypedValue;
+import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.DoublePointData;
+import io.opentelemetry.sdk.metrics.data.HistogramData;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.MetricDataType;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.metrics.data.SumData;
+import java.lang.management.ManagementFactory;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+import javax.annotation.Nullable;
+
+/** Utils to convert OpenTelemetry types to Google Cloud Monitoring types. */
+class BigtableExporterUtils {
+
+ private static final Logger logger = Logger.getLogger(BigtableExporterUtils.class.getName());
+
+ private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw";
+
+ // These metric labels will be promoted to the bigtable_table monitored resource fields
+ private static final Set> BIGTABLE_PROMOTED_RESOURCE_LABELS =
+ ImmutableSet.of(
+ BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, TABLE_ID_KEY, CLUSTER_ID_KEY, ZONE_ID_KEY);
+
+ private BigtableExporterUtils() {}
+
+ /**
+ * In most cases this should look like java-${UUID}@${hostname}. The hostname will be retrieved
+ * from the jvm name and fallback to the local hostname.
+ */
+ static String getDefaultTaskValue() {
+ // Something like '@'
+ final String jvmName = ManagementFactory.getRuntimeMXBean().getName();
+ // If jvm doesn't have the expected format, fallback to the local hostname
+ if (jvmName.indexOf('@') < 1) {
+ String hostname = "localhost";
+ try {
+ hostname = InetAddress.getLocalHost().getHostName();
+ } catch (UnknownHostException e) {
+ logger.log(Level.INFO, "Unable to get the hostname.", e);
+ }
+ // Generate a random number and use the same format "random_number@hostname".
+ return "java-" + UUID.randomUUID() + "@" + hostname;
+ }
+ return "java-" + UUID.randomUUID() + jvmName;
+ }
+
+ static String getProjectId(PointData pointData) {
+ return pointData.getAttributes().get(BIGTABLE_PROJECT_ID_KEY);
+ }
+
+ static List convertToBigtableTimeSeries(List collection, String taskId) {
+ List allTimeSeries = new ArrayList<>();
+
+ for (MetricData metricData : collection) {
+ if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) {
+ // Filter out metric data for instruments that are not part of the bigtable builtin metrics
+ continue;
+ }
+ metricData.getData().getPoints().stream()
+ .map(pointData -> convertPointToBigtableTimeSeries(metricData, pointData, taskId))
+ .forEach(allTimeSeries::add);
+ }
+
+ return allTimeSeries;
+ }
+
+ static List convertToApplicationResourceTimeSeries(
+ Collection collection, String taskId, MonitoredResource applicationResource) {
+ Preconditions.checkNotNull(
+ applicationResource,
+ "convert application metrics is called when the supported resource is not detected");
+ List allTimeSeries = new ArrayList<>();
+ for (MetricData metricData : collection) {
+ if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) {
+ // Filter out metric data for instruments that are not part of the bigtable builtin metrics
+ continue;
+ }
+ metricData.getData().getPoints().stream()
+ .map(
+ pointData ->
+ convertPointToApplicationResourceTimeSeries(
+ metricData, pointData, taskId, applicationResource))
+ .forEach(allTimeSeries::add);
+ }
+ return allTimeSeries;
+ }
+
+ @Nullable
+ static MonitoredResource detectResource() {
+ GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE;
+ DetectedPlatform detectedPlatform = detector.detectPlatform();
+ MonitoredResource monitoredResource = null;
+ try {
+ switch (detectedPlatform.getSupportedPlatform()) {
+ case GOOGLE_COMPUTE_ENGINE:
+ monitoredResource =
+ createGceMonitoredResource(
+ detectedPlatform.getProjectId(), detectedPlatform.getAttributes());
+ break;
+ case GOOGLE_KUBERNETES_ENGINE:
+ monitoredResource =
+ createGkeMonitoredResource(
+ detectedPlatform.getProjectId(), detectedPlatform.getAttributes());
+ break;
+ }
+ } catch (IllegalStateException e) {
+ logger.log(
+ Level.WARNING,
+ "Failed to create monitored resource for " + detectedPlatform.getSupportedPlatform(),
+ e);
+ }
+ return monitoredResource;
+ }
+
+ private static MonitoredResource createGceMonitoredResource(
+ String projectId, Map attributes) {
+ return MonitoredResource.newBuilder()
+ .setType("gce_instance")
+ .putLabels("project_id", projectId)
+ .putLabels("instance_id", getAttribute(attributes, AttributeKeys.GCE_INSTANCE_ID))
+ .putLabels("zone", getAttribute(attributes, AttributeKeys.GCE_AVAILABILITY_ZONE))
+ .build();
+ }
+
+ private static MonitoredResource createGkeMonitoredResource(
+ String projectId, Map attributes) {
+ return MonitoredResource.newBuilder()
+ .setType("k8s_container")
+ .putLabels("project_id", projectId)
+ .putLabels("location", getAttribute(attributes, AttributeKeys.GKE_CLUSTER_LOCATION))
+ .putLabels("cluster_name", getAttribute(attributes, AttributeKeys.GKE_CLUSTER_NAME))
+ .putLabels("namespace_name", MoreObjects.firstNonNull(System.getenv("NAMESPACE"), ""))
+ .putLabels("pod_name", MoreObjects.firstNonNull(System.getenv("HOSTNAME"), ""))
+ .putLabels("container_name", MoreObjects.firstNonNull(System.getenv("CONTAINER_NAME"), ""))
+ .build();
+ }
+
+ private static String getAttribute(Map attributes, String key) {
+ String value = attributes.get(key);
+ if (value == null) {
+ throw new IllegalStateException(
+ "Required attribute " + key + " does not exist in the attributes map " + attributes);
+ }
+ return value;
+ }
+
+ private static TimeSeries convertPointToBigtableTimeSeries(
+ MetricData metricData, PointData pointData, String taskId) {
+ TimeSeries.Builder builder =
+ TimeSeries.newBuilder()
+ .setMetricKind(convertMetricKind(metricData))
+ .setValueType(convertValueType(metricData.getType()));
+ Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName());
+
+ Attributes attributes = pointData.getAttributes();
+ MonitoredResource.Builder monitoredResourceBuilder =
+ MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE);
+
+ for (AttributeKey> key : attributes.asMap().keySet()) {
+ if (BIGTABLE_PROMOTED_RESOURCE_LABELS.contains(key)) {
+ monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ } else {
+ metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ }
+ }
+
+ builder.setResource(monitoredResourceBuilder.build());
+
+ metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId);
+ builder.setMetric(metricBuilder.build());
+
+ TimeInterval timeInterval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos()))
+ .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos()))
+ .build();
+
+ builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval));
+
+ return builder.build();
+ }
+
+ private static TimeSeries convertPointToApplicationResourceTimeSeries(
+ MetricData metricData,
+ PointData pointData,
+ String taskId,
+ MonitoredResource applicationResource) {
+ TimeSeries.Builder builder =
+ TimeSeries.newBuilder()
+ .setMetricKind(convertMetricKind(metricData))
+ .setValueType(convertValueType(metricData.getType()))
+ .setResource(applicationResource);
+
+ Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName());
+
+ Attributes attributes = pointData.getAttributes();
+ for (AttributeKey> key : attributes.asMap().keySet()) {
+ metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key)));
+ }
+
+ metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId);
+ builder.setMetric(metricBuilder.build());
+
+ TimeInterval timeInterval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos()))
+ .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos()))
+ .build();
+
+ builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval));
+ return builder.build();
+ }
+
+ private static MetricKind convertMetricKind(MetricData metricData) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return convertHistogramType(metricData.getHistogramData());
+ case LONG_GAUGE:
+ case DOUBLE_GAUGE:
+ return GAUGE;
+ case LONG_SUM:
+ return convertSumDataType(metricData.getLongSumData());
+ case DOUBLE_SUM:
+ return convertSumDataType(metricData.getDoubleSumData());
+ default:
+ return UNRECOGNIZED;
+ }
+ }
+
+ private static MetricKind convertHistogramType(HistogramData histogramData) {
+ if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) {
+ return CUMULATIVE;
+ }
+ return UNRECOGNIZED;
+ }
+
+ private static MetricKind convertSumDataType(SumData> sum) {
+ if (!sum.isMonotonic()) {
+ return GAUGE;
+ }
+ if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) {
+ return CUMULATIVE;
+ }
+ return UNRECOGNIZED;
+ }
+
+ private static ValueType convertValueType(MetricDataType metricDataType) {
+ switch (metricDataType) {
+ case LONG_GAUGE:
+ case LONG_SUM:
+ return INT64;
+ case DOUBLE_GAUGE:
+ case DOUBLE_SUM:
+ return DOUBLE;
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return DISTRIBUTION;
+ default:
+ return ValueType.UNRECOGNIZED;
+ }
+ }
+
+ private static Point createPoint(
+ MetricDataType type, PointData pointData, TimeInterval timeInterval) {
+ Point.Builder builder = Point.newBuilder().setInterval(timeInterval);
+ switch (type) {
+ case HISTOGRAM:
+ case EXPONENTIAL_HISTOGRAM:
+ return builder
+ .setValue(
+ TypedValue.newBuilder()
+ .setDistributionValue(convertHistogramData((HistogramPointData) pointData))
+ .build())
+ .build();
+ case DOUBLE_GAUGE:
+ case DOUBLE_SUM:
+ return builder
+ .setValue(
+ TypedValue.newBuilder()
+ .setDoubleValue(((DoublePointData) pointData).getValue())
+ .build())
+ .build();
+ case LONG_GAUGE:
+ case LONG_SUM:
+ return builder
+ .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue()))
+ .build();
+ default:
+ logger.log(Level.WARNING, "unsupported metric type");
+ return builder.build();
+ }
+ }
+
+ private static Distribution convertHistogramData(HistogramPointData pointData) {
+ return Distribution.newBuilder()
+ .setCount(pointData.getCount())
+ .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount())
+ .setBucketOptions(
+ BucketOptions.newBuilder()
+ .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries())))
+ .addAllBucketCounts(pointData.getCounts())
+ .build();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
index 1cda49934c..3b2242385a 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java
@@ -42,7 +42,7 @@ public void streamCreated(Attributes transportAttrs, Metadata headers) {
@Override
public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalUncompressedSize) {
- tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.MILLISECONDS));
+ tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.NANOSECONDS));
}
static class Factory extends ClientStreamTracer.Factory {
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java
new file mode 100644
index 0000000000..d85300828b
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java
@@ -0,0 +1,220 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalApi;
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.ImmutableSet;
+import io.opentelemetry.api.common.AttributeKey;
+import io.opentelemetry.sdk.metrics.Aggregation;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.InstrumentType;
+import io.opentelemetry.sdk.metrics.View;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/** Defining Bigtable builit-in metrics scope, attributes, metric names and views. */
+@InternalApi
+public class BuiltinMetricsConstants {
+
+ // Metric attribute keys for monitored resource
+ public static final AttributeKey BIGTABLE_PROJECT_ID_KEY =
+ AttributeKey.stringKey("project_id");
+ public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance");
+ public static final AttributeKey TABLE_ID_KEY = AttributeKey.stringKey("table");
+ public static final AttributeKey CLUSTER_ID_KEY = AttributeKey.stringKey("cluster");
+ public static final AttributeKey ZONE_ID_KEY = AttributeKey.stringKey("zone");
+
+ // Metric attribute keys for labels
+ // We need to access APP_PROFILE_KEY in EnhancedBigtableStubSettings and STREAMING_KEY in
+ // IT tests, so they're public.
+ public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile");
+ public static final AttributeKey STREAMING_KEY = AttributeKey.booleanKey("streaming");
+ public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name");
+ static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method");
+ static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status");
+ static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid");
+
+ // Metric names
+ public static final String OPERATION_LATENCIES_NAME = "operation_latencies";
+ public static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies";
+ static final String RETRY_COUNT_NAME = "retry_count";
+ static final String CONNECTIVITY_ERROR_COUNT_NAME = "connectivity_error_count";
+ static final String SERVER_LATENCIES_NAME = "server_latencies";
+ static final String FIRST_RESPONSE_LATENCIES_NAME = "first_response_latencies";
+ static final String APPLICATION_BLOCKING_LATENCIES_NAME = "application_latencies";
+ static final String CLIENT_BLOCKING_LATENCIES_NAME = "throttling_latencies";
+ static final String PER_CONNECTION_ERROR_COUNT_NAME = "per_connection_error_count";
+
+ // Buckets under 100,000 are identical to buckets for server side metrics handler_latencies.
+ // Extending client side bucket to up to 3,200,000.
+ private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM =
+ Aggregation.explicitBucketHistogram(
+ ImmutableList.of(
+ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0,
+ 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0,
+ 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0,
+ 400000.0, 800000.0, 1600000.0, 3200000.0)); // max is 53.3 minutes
+
+ private static final Aggregation AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM =
+ Aggregation.explicitBucketHistogram(
+ ImmutableList.of(
+ 1.0,
+ 2.0,
+ 4.0,
+ 8.0,
+ 16.0,
+ 32.0,
+ 64.0,
+ 125.0,
+ 250.0,
+ 500.0,
+ 1_000.0,
+ 2_000.0,
+ 4_000.0,
+ 8_000.0,
+ 16_000.0,
+ 32_000.0,
+ 64_000.0,
+ 128_000.0,
+ 250_000.0,
+ 500_000.0,
+ 1_000_000.0));
+
+ public static final String METER_NAME = "bigtable.googleapis.com/internal/client/";
+
+ static final Set COMMON_ATTRIBUTES =
+ ImmutableSet.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ INSTANCE_ID_KEY,
+ TABLE_ID_KEY,
+ APP_PROFILE_KEY,
+ CLUSTER_ID_KEY,
+ ZONE_ID_KEY,
+ METHOD_KEY,
+ CLIENT_NAME_KEY);
+
+ static void defineView(
+ ImmutableMap.Builder viewMap,
+ String id,
+ Aggregation aggregation,
+ InstrumentType type,
+ String unit,
+ Set attributes) {
+ InstrumentSelector selector =
+ InstrumentSelector.builder()
+ .setName(id)
+ .setMeterName(METER_NAME)
+ .setType(type)
+ .setUnit(unit)
+ .build();
+ Set attributesFilter =
+ ImmutableSet.builder()
+ .addAll(
+ COMMON_ATTRIBUTES.stream().map(AttributeKey::getKey).collect(Collectors.toSet()))
+ .addAll(attributes.stream().map(AttributeKey::getKey).collect(Collectors.toSet()))
+ .build();
+ View view =
+ View.builder()
+ .setName(METER_NAME + id)
+ .setAggregation(aggregation)
+ .setAttributeFilter(attributesFilter)
+ .build();
+
+ viewMap.put(selector, view);
+ }
+
+ public static Map getAllViews() {
+ ImmutableMap.Builder views = ImmutableMap.builder();
+
+ defineView(
+ views,
+ OPERATION_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder()
+ .addAll(COMMON_ATTRIBUTES)
+ .add(STREAMING_KEY, STATUS_KEY)
+ .build());
+ defineView(
+ views,
+ ATTEMPT_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder()
+ .addAll(COMMON_ATTRIBUTES)
+ .add(STREAMING_KEY, STATUS_KEY)
+ .build());
+ defineView(
+ views,
+ SERVER_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ FIRST_RESPONSE_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ APPLICATION_BLOCKING_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build());
+ defineView(
+ views,
+ CLIENT_BLOCKING_LATENCIES_NAME,
+ AGGREGATION_WITH_MILLIS_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "ms",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build());
+ defineView(
+ views,
+ RETRY_COUNT_NAME,
+ Aggregation.sum(),
+ InstrumentType.COUNTER,
+ "1",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+ defineView(
+ views,
+ CONNECTIVITY_ERROR_COUNT_NAME,
+ Aggregation.sum(),
+ InstrumentType.COUNTER,
+ "1",
+ ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build());
+
+ defineView(
+ views,
+ PER_CONNECTION_ERROR_COUNT_NAME,
+ AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM,
+ InstrumentType.HISTOGRAM,
+ "1",
+ ImmutableSet.builder()
+ .add(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME_KEY)
+ .build());
+
+ return views.build();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
index 2d8262a93e..abd214d760 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java
@@ -16,13 +16,22 @@
package com.google.cloud.bigtable.data.v2.stub.metrics;
import static com.google.api.gax.tracing.ApiTracerFactory.OperationType;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
import com.google.api.gax.retrying.ServerStreamingAttemptException;
import com.google.api.gax.tracing.SpanName;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapper;
-import com.google.common.annotations.VisibleForTesting;
+import com.google.cloud.bigtable.Version;
import com.google.common.base.Stopwatch;
import com.google.common.math.IntMath;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.DoubleHistogram;
+import io.opentelemetry.api.metrics.LongCounter;
import java.util.concurrent.CancellationException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
@@ -37,8 +46,7 @@
*/
class BuiltinMetricsTracer extends BigtableTracer {
- private final StatsRecorderWrapper recorder;
-
+ private static final String NAME = "java-bigtable/" + Version.VERSION;
private final OperationType operationType;
private final SpanName spanName;
@@ -64,21 +72,56 @@ class BuiltinMetricsTracer extends BigtableTracer {
private boolean flowControlIsDisabled = false;
- private AtomicInteger requestLeft = new AtomicInteger(0);
+ private final AtomicInteger requestLeft = new AtomicInteger(0);
// Monitored resource labels
private String tableId = "unspecified";
private String zone = "global";
private String cluster = "unspecified";
- private AtomicLong totalClientBlockingTime = new AtomicLong(0);
+ private final AtomicLong totalClientBlockingTime = new AtomicLong(0);
+
+ private final Attributes baseAttributes;
+
+ private Long serverLatencies = null;
+
+ // OpenCensus (and server) histogram buckets use [start, end), however OpenTelemetry uses (start,
+ // end]. To work around this, we measure all the latencies in nanoseconds and convert them
+ // to milliseconds and use DoubleHistogram. This should minimize the chance of a data
+ // point fall on the bucket boundary that causes off by one errors.
+ private final DoubleHistogram operationLatenciesHistogram;
+ private final DoubleHistogram attemptLatenciesHistogram;
+ private final DoubleHistogram serverLatenciesHistogram;
+ private final DoubleHistogram firstResponseLatenciesHistogram;
+ private final DoubleHistogram clientBlockingLatenciesHistogram;
+ private final DoubleHistogram applicationBlockingLatenciesHistogram;
+ private final LongCounter connectivityErrorCounter;
+ private final LongCounter retryCounter;
- @VisibleForTesting
BuiltinMetricsTracer(
- OperationType operationType, SpanName spanName, StatsRecorderWrapper recorder) {
+ OperationType operationType,
+ SpanName spanName,
+ Attributes attributes,
+ DoubleHistogram operationLatenciesHistogram,
+ DoubleHistogram attemptLatenciesHistogram,
+ DoubleHistogram serverLatenciesHistogram,
+ DoubleHistogram firstResponseLatenciesHistogram,
+ DoubleHistogram clientBlockingLatenciesHistogram,
+ DoubleHistogram applicationBlockingLatenciesHistogram,
+ LongCounter connectivityErrorCounter,
+ LongCounter retryCounter) {
this.operationType = operationType;
this.spanName = spanName;
- this.recorder = recorder;
+ this.baseAttributes = attributes;
+
+ this.operationLatenciesHistogram = operationLatenciesHistogram;
+ this.attemptLatenciesHistogram = attemptLatenciesHistogram;
+ this.serverLatenciesHistogram = serverLatenciesHistogram;
+ this.firstResponseLatenciesHistogram = firstResponseLatenciesHistogram;
+ this.clientBlockingLatenciesHistogram = clientBlockingLatenciesHistogram;
+ this.applicationBlockingLatenciesHistogram = applicationBlockingLatenciesHistogram;
+ this.connectivityErrorCounter = connectivityErrorCounter;
+ this.retryCounter = retryCounter;
}
@Override
@@ -203,13 +246,8 @@ public int getAttempt() {
@Override
public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) {
- // Record the metrics and put in the map after the attempt is done, so we can have cluster and
- // zone information
if (latency != null) {
- recorder.putGfeLatencies(latency);
- recorder.putGfeMissingHeaders(0);
- } else {
- recorder.putGfeMissingHeaders(1);
+ serverLatencies = latency;
}
}
@@ -220,13 +258,13 @@ public void setLocations(String zone, String cluster) {
}
@Override
- public void batchRequestThrottled(long throttledTimeMs) {
- totalClientBlockingTime.addAndGet(throttledTimeMs);
+ public void batchRequestThrottled(long throttledTimeNanos) {
+ totalClientBlockingTime.addAndGet(Duration.ofNanos(throttledTimeNanos).toMillis());
}
@Override
- public void grpcChannelQueuedLatencies(long queuedTimeMs) {
- totalClientBlockingTime.addAndGet(queuedTimeMs);
+ public void grpcChannelQueuedLatencies(long queuedTimeNanos) {
+ totalClientBlockingTime.addAndGet(queuedTimeNanos);
}
@Override
@@ -239,26 +277,43 @@ private void recordOperationCompletion(@Nullable Throwable status) {
return;
}
operationTimer.stop();
- long operationLatency = operationTimer.elapsed(TimeUnit.MILLISECONDS);
+
+ boolean isStreaming = operationType == OperationType.ServerStreaming;
+ String statusStr = Util.extractStatus(status);
+
+ // Publish metric data with all the attributes. The attributes get filtered in
+ // BuiltinMetricsConstants when we construct the views.
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(METHOD_KEY, spanName.toString())
+ .put(CLIENT_NAME_KEY, NAME)
+ .put(STREAMING_KEY, isStreaming)
+ .put(STATUS_KEY, statusStr)
+ .build();
+
long operationLatencyNano = operationTimer.elapsed(TimeUnit.NANOSECONDS);
// Only record when retry count is greater than 0 so the retry
// graph will be less confusing
if (attemptCount > 1) {
- recorder.putRetryCount(attemptCount - 1);
+ retryCounter.add(attemptCount - 1, attributes);
}
+ operationLatenciesHistogram.record(convertToMs(operationLatencyNano), attributes);
+
// serverLatencyTimer should already be stopped in recordAttemptCompletion
- recorder.putOperationLatencies(operationLatency);
- recorder.putApplicationLatencies(
- Duration.ofNanos(operationLatencyNano - totalServerLatencyNano.get()).toMillis());
+ long applicationLatencyNano = operationLatencyNano - totalServerLatencyNano.get();
+ applicationBlockingLatenciesHistogram.record(convertToMs(applicationLatencyNano), attributes);
if (operationType == OperationType.ServerStreaming
&& spanName.getMethodName().equals("ReadRows")) {
- recorder.putFirstResponseLatencies(firstResponsePerOpTimer.elapsed(TimeUnit.MILLISECONDS));
+ firstResponseLatenciesHistogram.record(
+ convertToMs(firstResponsePerOpTimer.elapsed(TimeUnit.NANOSECONDS)), attributes);
}
-
- recorder.recordOperation(Util.extractStatus(status), tableId, zone, cluster);
}
private void recordAttemptCompletion(@Nullable Throwable status) {
@@ -273,8 +328,7 @@ private void recordAttemptCompletion(@Nullable Throwable status) {
}
}
- // Make sure to reset the blocking time after recording it for the next attempt
- recorder.putClientBlockingLatencies(totalClientBlockingTime.getAndSet(0));
+ boolean isStreaming = operationType == OperationType.ServerStreaming;
// Patch the status until it's fixed in gax. When an attempt failed,
// it'll throw a ServerStreamingAttemptException. Unwrap the exception
@@ -283,7 +337,35 @@ private void recordAttemptCompletion(@Nullable Throwable status) {
status = status.getCause();
}
- recorder.putAttemptLatencies(attemptTimer.elapsed(TimeUnit.MILLISECONDS));
- recorder.recordAttempt(Util.extractStatus(status), tableId, zone, cluster);
+ String statusStr = Util.extractStatus(status);
+
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(METHOD_KEY, spanName.toString())
+ .put(CLIENT_NAME_KEY, NAME)
+ .put(STREAMING_KEY, isStreaming)
+ .put(STATUS_KEY, statusStr)
+ .build();
+
+ clientBlockingLatenciesHistogram.record(convertToMs(totalClientBlockingTime.get()), attributes);
+
+ attemptLatenciesHistogram.record(
+ convertToMs(attemptTimer.elapsed(TimeUnit.NANOSECONDS)), attributes);
+
+ if (serverLatencies != null) {
+ serverLatenciesHistogram.record(serverLatencies, attributes);
+ connectivityErrorCounter.add(0, attributes);
+ } else {
+ connectivityErrorCounter.add(1, attributes);
+ }
+ }
+
+ private static double convertToMs(long nanoSeconds) {
+ double toMs = 1e-6;
+ return nanoSeconds * toMs;
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
index 794997071d..f0ac656978 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java
@@ -15,29 +15,112 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+
import com.google.api.core.InternalApi;
import com.google.api.gax.tracing.ApiTracer;
import com.google.api.gax.tracing.ApiTracerFactory;
import com.google.api.gax.tracing.BaseApiTracerFactory;
import com.google.api.gax.tracing.SpanName;
-import com.google.cloud.bigtable.stats.StatsWrapper;
-import com.google.common.collect.ImmutableMap;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.DoubleHistogram;
+import io.opentelemetry.api.metrics.LongCounter;
+import io.opentelemetry.api.metrics.Meter;
+import java.io.IOException;
/**
- * {@link ApiTracerFactory} that will generate OpenCensus metrics by using the {@link ApiTracer}
+ * {@link ApiTracerFactory} that will generate OpenTelemetry metrics by using the {@link ApiTracer}
* api.
*/
@InternalApi("For internal use only")
public class BuiltinMetricsTracerFactory extends BaseApiTracerFactory {
- private final ImmutableMap statsAttributes;
+ private final Attributes attributes;
+
+ private static final String MILLISECOND = "ms";
+ private static final String COUNT = "1";
- public static BuiltinMetricsTracerFactory create(ImmutableMap statsAttributes) {
- return new BuiltinMetricsTracerFactory(statsAttributes);
+ private final DoubleHistogram operationLatenciesHistogram;
+ private final DoubleHistogram attemptLatenciesHistogram;
+ private final DoubleHistogram serverLatenciesHistogram;
+ private final DoubleHistogram firstResponseLatenciesHistogram;
+ private final DoubleHistogram clientBlockingLatenciesHistogram;
+ private final DoubleHistogram applicationBlockingLatenciesHistogram;
+ private final LongCounter connectivityErrorCounter;
+ private final LongCounter retryCounter;
+
+ public static BuiltinMetricsTracerFactory create(
+ OpenTelemetry openTelemetry, Attributes attributes) throws IOException {
+ return new BuiltinMetricsTracerFactory(openTelemetry, attributes);
}
- private BuiltinMetricsTracerFactory(ImmutableMap statsAttributes) {
- this.statsAttributes = statsAttributes;
+ BuiltinMetricsTracerFactory(OpenTelemetry openTelemetry, Attributes attributes) {
+ this.attributes = attributes;
+ Meter meter = openTelemetry.getMeter(METER_NAME);
+
+ operationLatenciesHistogram =
+ meter
+ .histogramBuilder(OPERATION_LATENCIES_NAME)
+ .setDescription(
+ "Total time until final operation success or failure, including retries and backoff.")
+ .setUnit(MILLISECOND)
+ .build();
+ attemptLatenciesHistogram =
+ meter
+ .histogramBuilder(ATTEMPT_LATENCIES_NAME)
+ .setDescription("Client observed latency per RPC attempt.")
+ .setUnit(MILLISECOND)
+ .build();
+ serverLatenciesHistogram =
+ meter
+ .histogramBuilder(SERVER_LATENCIES_NAME)
+ .setDescription(
+ "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.")
+ .setUnit(MILLISECOND)
+ .build();
+ firstResponseLatenciesHistogram =
+ meter
+ .histogramBuilder(FIRST_RESPONSE_LATENCIES_NAME)
+ .setDescription(
+ "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.")
+ .setUnit(MILLISECOND)
+ .build();
+ clientBlockingLatenciesHistogram =
+ meter
+ .histogramBuilder(CLIENT_BLOCKING_LATENCIES_NAME)
+ .setDescription(
+ "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.")
+ .setUnit(MILLISECOND)
+ .build();
+ applicationBlockingLatenciesHistogram =
+ meter
+ .histogramBuilder(APPLICATION_BLOCKING_LATENCIES_NAME)
+ .setDescription(
+ "The latency of the client application consuming available response data.")
+ .setUnit(MILLISECOND)
+ .build();
+ connectivityErrorCounter =
+ meter
+ .counterBuilder(CONNECTIVITY_ERROR_COUNT_NAME)
+ .setDescription(
+ "Number of requests that failed to reach the Google datacenter. (Requests without google response headers")
+ .setUnit(COUNT)
+ .build();
+ retryCounter =
+ meter
+ .counterBuilder(RETRY_COUNT_NAME)
+ .setDescription("The number of additional RPCs sent after the initial attempt.")
+ .setUnit(COUNT)
+ .build();
}
@Override
@@ -45,6 +128,14 @@ public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType op
return new BuiltinMetricsTracer(
operationType,
spanName,
- StatsWrapper.createRecorder(operationType, spanName, statsAttributes));
+ attributes,
+ operationLatenciesHistogram,
+ attemptLatenciesHistogram,
+ serverLatenciesHistogram,
+ firstResponseLatenciesHistogram,
+ clientBlockingLatenciesHistogram,
+ applicationBlockingLatenciesHistogram,
+ connectivityErrorCounter,
+ retryCounter);
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java
new file mode 100644
index 0000000000..445160a146
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.auth.Credentials;
+import com.google.auth.oauth2.GoogleCredentials;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.export.MetricExporter;
+import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader;
+import java.io.IOException;
+import java.util.Map;
+import javax.annotation.Nullable;
+
+/**
+ * A util class to register built-in metrics on a custom OpenTelemetry instance. This is for
+ * advanced usage, and is only necessary when wanting to write built-in metrics to cloud monitoring
+ * and custom sinks. Please refer to {@link CustomOpenTelemetryMetricsProvider} for example usage.
+ */
+public class BuiltinMetricsView {
+
+ private BuiltinMetricsView() {}
+
+ /**
+ * Register built-in metrics on the {@link SdkMeterProviderBuilder} with application default
+ * credentials.
+ */
+ public static void registerBuiltinMetrics(String projectId, SdkMeterProviderBuilder builder)
+ throws IOException {
+ BuiltinMetricsView.registerBuiltinMetrics(
+ projectId, GoogleCredentials.getApplicationDefault(), builder);
+ }
+
+ /** Register built-in metrics on the {@link SdkMeterProviderBuilder} with credentials. */
+ public static void registerBuiltinMetrics(
+ String projectId, @Nullable Credentials credentials, SdkMeterProviderBuilder builder)
+ throws IOException {
+ MetricExporter metricExporter = BigtableCloudMonitoringExporter.create(projectId, credentials);
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ builder.registerView(entry.getKey(), entry.getValue());
+ }
+ builder.registerMetricReader(PeriodicMetricReader.create(metricExporter));
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java
new file mode 100644
index 0000000000..8c1c5c1c90
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java
@@ -0,0 +1,70 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.common.base.MoreObjects;
+import io.opentelemetry.api.OpenTelemetry;
+
+/**
+ * Set a custom OpenTelemetry instance.
+ *
+ * To register client side metrics on the custom OpenTelemetry:
+ *
+ *
{@code
+ * SdkMeterProviderBuilder sdkMeterProvider = SdkMeterProvider.builder();
+ *
+ * // register Builtin metrics on your meter provider with default credentials
+ * BuiltinMetricsView.registerBuiltinMetrics("project-id", sdkMeterProvider);
+ *
+ * // register other metrics reader and views
+ * sdkMeterProvider.registerMetricReader(..);
+ * sdkMeterProvider.registerView(..);
+ *
+ * // create the OTEL instance
+ * OpenTelemetry openTelemetry = OpenTelemetrySdk
+ * .builder()
+ * .setMeterProvider(sdkMeterProvider.build())
+ * .build();
+ *
+ * // Override MetricsProvider in BigtableDataSettings
+ * BigtableDataSettings settings = BigtableDataSettings.newBuilder()
+ * .setProjectId("my-project")
+ * .setInstanceId("my-instance-id")
+ * .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)
+ * .build();
+ * }
+ */
+public final class CustomOpenTelemetryMetricsProvider implements MetricsProvider {
+
+ private final OpenTelemetry otel;
+
+ public static CustomOpenTelemetryMetricsProvider create(OpenTelemetry otel) {
+ return new CustomOpenTelemetryMetricsProvider(otel);
+ }
+
+ private CustomOpenTelemetryMetricsProvider(OpenTelemetry otel) {
+ this.otel = otel;
+ }
+
+ public OpenTelemetry getOpenTelemetry() {
+ return otel;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).add("openTelemetry", otel).toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
new file mode 100644
index 0000000000..b8aad8c931
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalApi;
+import com.google.auth.Credentials;
+import com.google.common.base.MoreObjects;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import java.io.IOException;
+import javax.annotation.Nullable;
+
+/**
+ * Set {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)},
+ * to {@link this#INSTANCE} to enable collecting and export client side metrics
+ * https://cloud.google.com/bigtable/docs/client-side-metrics. This is the default setting in {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings}.
+ */
+public final class DefaultMetricsProvider implements MetricsProvider {
+
+ public static DefaultMetricsProvider INSTANCE = new DefaultMetricsProvider();
+
+ private OpenTelemetry openTelemetry;
+ private String projectId;
+
+ private DefaultMetricsProvider() {}
+
+ @InternalApi
+ public OpenTelemetry getOpenTelemetry(String projectId, @Nullable Credentials credentials)
+ throws IOException {
+ this.projectId = projectId;
+ if (openTelemetry == null) {
+ SdkMeterProviderBuilder meterProvider = SdkMeterProvider.builder();
+ BuiltinMetricsView.registerBuiltinMetrics(projectId, credentials, meterProvider);
+ openTelemetry = OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+ }
+ return openTelemetry;
+ }
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this)
+ .add("projectId", projectId)
+ .add("openTelemetry", openTelemetry)
+ .toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
index cab3b0bbd0..a891df9509 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java
@@ -15,12 +15,15 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME;
+
import com.google.api.core.InternalApi;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection;
-import com.google.cloud.bigtable.stats.StatsWrapper;
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableMap;
import io.grpc.ClientInterceptor;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.metrics.LongHistogram;
+import io.opentelemetry.api.metrics.Meter;
import java.util.Collections;
import java.util.Set;
import java.util.WeakHashMap;
@@ -30,24 +33,30 @@
/* Background task that goes through all connections and updates the errors_per_connection metric. */
@InternalApi("For internal use only")
public class ErrorCountPerConnectionMetricTracker implements Runnable {
+
private static final Integer PER_CONNECTION_ERROR_COUNT_PERIOD_SECONDS = 60;
+
+ private final LongHistogram perConnectionErrorCountHistogram;
+ private final Attributes attributes;
+
private final Set connectionErrorCountInterceptors;
private final Object interceptorsLock = new Object();
- // This is not final so that it can be updated and mocked during testing.
- private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection;
- @VisibleForTesting
- void setStatsRecorderWrapperForConnection(
- StatsRecorderWrapperForConnection statsRecorderWrapperForConnection) {
- this.statsRecorderWrapperForConnection = statsRecorderWrapperForConnection;
- }
-
- public ErrorCountPerConnectionMetricTracker(ImmutableMap builtinAttributes) {
+ public ErrorCountPerConnectionMetricTracker(OpenTelemetry openTelemetry, Attributes attributes) {
connectionErrorCountInterceptors =
Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<>()));
- this.statsRecorderWrapperForConnection =
- StatsWrapper.createRecorderForConnection(builtinAttributes);
+ Meter meter = openTelemetry.getMeter(METER_NAME);
+
+ perConnectionErrorCountHistogram =
+ meter
+ .histogramBuilder(PER_CONNECTION_ERROR_COUNT_NAME)
+ .ofLongs()
+ .setDescription("Distribution of counts of channels per 'error count per minute'.")
+ .setUnit("1")
+ .build();
+
+ this.attributes = attributes;
}
public void startConnectionErrorCountTracker(ScheduledExecutorService scheduler) {
@@ -75,7 +84,7 @@ public void run() {
if (errors > 0 || successes > 0) {
// TODO: add a metric to also keep track of the number of successful requests per each
// connection.
- statsRecorderWrapperForConnection.putAndRecordPerConnectionErrorCount(errors);
+ perConnectionErrorCountHistogram.record(errors, attributes);
}
}
}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java
new file mode 100644
index 0000000000..251bb41619
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.api.core.InternalExtensionOnly;
+
+/**
+ * Provide client side metrics https://cloud.google.com/bigtable/docs/client-side-metrics
+ * implementations.
+ */
+@InternalExtensionOnly
+public interface MetricsProvider {}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
new file mode 100644
index 0000000000..9a00ddb135
--- /dev/null
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
@@ -0,0 +1,36 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import com.google.common.base.MoreObjects;
+
+/**
+ * Set {@link
+ * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)},
+ * to {@link this#INSTANCE} to disable collecting and export client side metrics
+ * https://cloud.google.com/bigtable/docs/client-side-metrics.
+ */
+public final class NoopMetricsProvider implements MetricsProvider {
+
+ public static NoopMetricsProvider INSTANCE = new NoopMetricsProvider();
+
+ private NoopMetricsProvider() {}
+
+ @Override
+ public String toString() {
+ return MoreObjects.toStringHelper(this).toString();
+ }
+}
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
index b7140f0156..ce73d75dc1 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java
@@ -21,6 +21,7 @@
import com.google.api.gax.rpc.ApiCallContext;
import com.google.api.gax.rpc.UnaryCallable;
import com.google.api.gax.tracing.ApiTracer;
+import org.threeten.bp.Duration;
/**
* This callable will extract total throttled time from {@link ApiCallContext} and add it to {@link
@@ -42,7 +43,8 @@ public ApiFuture futureCall(RequestT request, ApiCallContext context)
// this should always be true
if (tracer instanceof BigtableTracer) {
((BigtableTracer) tracer)
- .batchRequestThrottled(context.getOption(Batcher.THROTTLED_TIME_KEY));
+ .batchRequestThrottled(
+ Duration.ofMillis(context.getOption(Batcher.THROTTLED_TIME_KEY)).toNanos());
}
}
return innerCallable.futureCall(request, context);
diff --git a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json
index 931ac73adc..95c5bab9e5 100644
--- a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json
+++ b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json
@@ -395,6 +395,33 @@
"allDeclaredClasses": true,
"allPublicClasses": true
},
+ {
+ "name": "com.google.bigtable.admin.v2.AppProfile$DataBoostIsolationReadOnly",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.admin.v2.AppProfile$DataBoostIsolationReadOnly$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.admin.v2.AppProfile$DataBoostIsolationReadOnly$ComputeBillingOwner",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
{
"name": "com.google.bigtable.admin.v2.AppProfile$MultiClusterRoutingUseAny",
"queryAllDeclaredConstructors": true,
@@ -1052,6 +1079,24 @@
"allDeclaredClasses": true,
"allPublicClasses": true
},
+ {
+ "name": "com.google.bigtable.admin.v2.DataBoostReadLocalWrites",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.admin.v2.DataBoostReadLocalWrites$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
{
"name": "com.google.bigtable.admin.v2.DeleteAppProfileRequest",
"queryAllDeclaredConstructors": true,
@@ -2033,6 +2078,24 @@
"allDeclaredClasses": true,
"allPublicClasses": true
},
+ {
+ "name": "com.google.bigtable.admin.v2.StandardReadRemoteWrites",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
+ {
+ "name": "com.google.bigtable.admin.v2.StandardReadRemoteWrites$Builder",
+ "queryAllDeclaredConstructors": true,
+ "queryAllPublicConstructors": true,
+ "queryAllDeclaredMethods": true,
+ "allPublicMethods": true,
+ "allDeclaredClasses": true,
+ "allPublicClasses": true
+ },
{
"name": "com.google.bigtable.admin.v2.StorageType",
"queryAllDeclaredConstructors": true,
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
index a35112b380..fea66e82bf 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java
@@ -36,6 +36,7 @@
import com.google.bigtable.v2.ReadRowsResponse;
import com.google.cloud.bigtable.data.v2.internal.NameUtil;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider;
import com.google.common.base.Preconditions;
import com.google.common.io.BaseEncoding;
import io.grpc.Attributes;
@@ -169,10 +170,13 @@ public void tearDown() {
@Test
public void testNewClientsShareTransportChannel() throws Exception {
-
// Create 3 lightweight clients
-
- try (BigtableDataClientFactory factory = BigtableDataClientFactory.create(defaultSettings);
+ try (BigtableDataClientFactory factory =
+ BigtableDataClientFactory.create(
+ defaultSettings
+ .toBuilder()
+ .setMetricsProvider(NoopMetricsProvider.INSTANCE)
+ .build());
BigtableDataClient ignored1 = factory.createForInstance("project1", "instance1");
BigtableDataClient ignored2 = factory.createForInstance("project2", "instance2");
BigtableDataClient ignored3 = factory.createForInstance("project3", "instance3")) {
@@ -316,7 +320,7 @@ public void testFeatureFlags() throws Exception {
@Test
public void testBulkMutationFlowControllerConfigured() throws Exception {
BigtableDataSettings settings =
- BigtableDataSettings.newBuilder()
+ BigtableDataSettings.newBuilderForEmulator(server.getPort())
.setProjectId("my-project")
.setInstanceId("my-instance")
.setCredentialsProvider(credentialsProvider)
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
index 4e75fb8631..56181a20ab 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
@@ -15,34 +15,64 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getStartTimeSeconds;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes;
+import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.Truth.assertWithMessage;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.client.util.Lists;
+import com.google.cloud.bigtable.admin.v2.BigtableInstanceAdminClient;
import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient;
+import com.google.cloud.bigtable.admin.v2.models.AppProfile;
+import com.google.cloud.bigtable.admin.v2.models.CreateAppProfileRequest;
import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest;
import com.google.cloud.bigtable.admin.v2.models.Table;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.PrefixGenerator;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
import com.google.cloud.monitoring.v3.MetricServiceClient;
import com.google.common.base.Stopwatch;
+import com.google.common.collect.BoundType;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Range;
import com.google.monitoring.v3.ListTimeSeriesRequest;
import com.google.monitoring.v3.ListTimeSeriesResponse;
+import com.google.monitoring.v3.Point;
import com.google.monitoring.v3.ProjectName;
import com.google.monitoring.v3.TimeInterval;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Timestamp;
import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.api.common.AttributesBuilder;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
@@ -50,6 +80,7 @@
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
import org.threeten.bp.Duration;
+import org.threeten.bp.Instant;
@RunWith(JUnit4.class)
public class BuiltinMetricsIT {
@@ -58,71 +89,131 @@ public class BuiltinMetricsIT {
private static final Logger logger = Logger.getLogger(BuiltinMetricsIT.class.getName());
@Rule public Timeout globalTimeout = Timeout.seconds(900);
- private static Table table;
- private static BigtableTableAdminClient tableAdminClient;
- private static MetricServiceClient metricClient;
+
+ private Table tableCustomOtel;
+ private Table tableDefault;
+ private BigtableDataClient clientCustomOtel;
+ private BigtableDataClient clientDefault;
+ private BigtableTableAdminClient tableAdminClient;
+ private BigtableInstanceAdminClient instanceAdminClient;
+ private MetricServiceClient metricClient;
+
+ private InMemoryMetricReader metricReader;
+ private String appProfileCustomOtel;
+ private String appProfileDefault;
public static String[] VIEWS = {
"operation_latencies",
"attempt_latencies",
"connectivity_error_count",
- "application_blocking_latencies"
+ "application_blocking_latencies",
};
- @BeforeClass
- public static void setUpClass() throws IOException {
+ @Before
+ public void setup() throws IOException {
+ // This test tests 2 things. End-to-end test using the default OTEL instance created by the
+ // client, and also end-to-end test using a custom OTEL instance set by the customer. In
+ // both tests, a BigtableCloudMonitoringExporter is created to export data to Cloud Monitoring.
assume()
.withMessage("Builtin metrics integration test is not supported by emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- // Enable built in metrics
- BigtableDataSettings.enableBuiltinMetrics();
-
// Create a cloud monitoring client
metricClient = MetricServiceClient.create();
tableAdminClient = testEnvRule.env().getTableAdminClient();
+ instanceAdminClient = testEnvRule.env().getInstanceAdminClient();
+ appProfileCustomOtel = PrefixGenerator.newPrefix("test1");
+ appProfileDefault = PrefixGenerator.newPrefix("test2");
+ instanceAdminClient.createAppProfile(
+ CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileCustomOtel)
+ .setRoutingPolicy(
+ AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId()))
+ .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW)));
+ instanceAdminClient.createAppProfile(
+ CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileDefault)
+ .setRoutingPolicy(
+ AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId()))
+ .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW)));
+
+ // When using the custom OTEL instance, we can also register a InMemoryMetricReader on the
+ // SdkMeterProvider to verify the data exported on Cloud Monitoring with the in memory metric
+ // data collected in InMemoryMetricReader.
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ clientCustomOtel =
+ BigtableDataClient.create(
+ settings
+ .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry))
+ .setAppProfileId(appProfileCustomOtel)
+ .build());
+ clientDefault = BigtableDataClient.create(settings.setAppProfileId(appProfileDefault).build());
}
- @AfterClass
- public static void tearDown() {
+ @After
+ public void tearDown() {
if (metricClient != null) {
metricClient.close();
}
- if (table != null) {
- tableAdminClient.deleteTable(table.getId());
+ if (tableCustomOtel != null) {
+ tableAdminClient.deleteTable(tableCustomOtel.getId());
+ }
+ if (tableDefault != null) {
+ tableAdminClient.deleteTable(tableDefault.getId());
+ }
+ if (instanceAdminClient != null) {
+ instanceAdminClient.deleteAppProfile(
+ testEnvRule.env().getInstanceId(), appProfileCustomOtel, true);
+ instanceAdminClient.deleteAppProfile(
+ testEnvRule.env().getInstanceId(), appProfileDefault, true);
+ }
+ if (clientCustomOtel != null) {
+ clientCustomOtel.close();
+ }
+ if (clientDefault != null) {
+ clientDefault.close();
}
}
@Test
- public void testBuiltinMetrics() throws Exception {
- logger.info("Started testing builtin metrics");
- table =
+ public void testBuiltinMetricsWithDefaultOTEL() throws Exception {
+ logger.info("Started testing builtin metrics with default OTEL");
+ tableDefault =
tableAdminClient.createTable(
- CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test"))
+ CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test1"))
.addFamily("cf"));
- logger.info("Create table: " + table.getId());
- // Send a MutateRow and ReadRows request
- testEnvRule
- .env()
- .getDataClient()
- .mutateRow(RowMutation.create(table.getId(), "a-new-key").setCell("cf", "q", "abc"));
+ logger.info("Create default table: " + tableDefault.getId());
+
+ Instant start = Instant.now().minus(Duration.ofSeconds(10));
+
+ // Send a MutateRow and ReadRows request and measure the latencies for these requests.
+ clientDefault.mutateRow(
+ RowMutation.create(tableDefault.getId(), "a-new-key").setCell("cf", "q", "abc"));
ArrayList rows =
- Lists.newArrayList(
- testEnvRule.env().getDataClient().readRows(Query.create(table.getId()).limit(10)));
+ Lists.newArrayList(clientDefault.readRows(Query.create(tableDefault.getId()).limit(10)));
- Stopwatch stopwatch = Stopwatch.createStarted();
+ // This stopwatch is used for to limit fetching of metric data in verifyMetrics
+ Stopwatch metricsPollingStopwatch = Stopwatch.createStarted();
ProjectName name = ProjectName.of(testEnvRule.env().getProjectId());
- // Restrict time to last 10 minutes and 5 minutes after the request
- long startMillis = System.currentTimeMillis() - Duration.ofMinutes(10).toMillis();
- long endMillis = startMillis + Duration.ofMinutes(15).toMillis();
+ // Interval is set in the monarch request when query metric timestamps.
+ // Restrict it to before we send to request and 3 minute after we send the request. If
+ // it turns out to be still flaky we can increase the filter range.
+ Instant end = Instant.now().plus(Duration.ofMinutes(3));
TimeInterval interval =
TimeInterval.newBuilder()
- .setStartTime(Timestamps.fromMillis(startMillis))
- .setEndTime(Timestamps.fromMillis(endMillis))
+ .setStartTime(Timestamps.fromMillis(start.toEpochMilli()))
+ .setEndTime(Timestamps.fromMillis(end.toEpochMilli()))
.build();
for (String view : VIEWS) {
@@ -132,42 +223,123 @@ public void testBuiltinMetrics() throws Exception {
String.format(
"metric.type=\"bigtable.googleapis.com/client/%s\" "
+ "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\""
- + " AND resource.labels.table=\"%s\"",
- view, testEnvRule.env().getInstanceId(), table.getId());
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault);
ListTimeSeriesRequest.Builder requestBuilder =
ListTimeSeriesRequest.newBuilder()
.setName(name.toString())
.setFilter(metricFilter)
.setInterval(interval)
.setView(ListTimeSeriesRequest.TimeSeriesView.FULL);
-
- verifyMetricsArePublished(requestBuilder.build(), stopwatch, view);
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
// Verify that metrics are published for ReadRows request
metricFilter =
String.format(
"metric.type=\"bigtable.googleapis.com/client/%s\" "
+ "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\""
- + " AND resource.labels.table=\"%s\"",
- view, testEnvRule.env().getInstanceId(), table.getId());
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault);
+ requestBuilder.setFilter(metricFilter);
+
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ }
+ }
+
+ @Test
+ public void testBuiltinMetricsWithCustomOTEL() throws Exception {
+ logger.info("Started testing builtin metrics with custom OTEL");
+ tableCustomOtel =
+ tableAdminClient.createTable(
+ CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test2"))
+ .addFamily("cf"));
+ logger.info("Create custom table: " + tableCustomOtel.getId());
+
+ Instant start = Instant.now().minus(Duration.ofSeconds(10));
+ // Send a MutateRow and ReadRows request and measure the latencies for these requests.
+ clientCustomOtel.mutateRow(
+ RowMutation.create(tableCustomOtel.getId(), "a-new-key").setCell("cf", "q", "abc"));
+ ArrayList rows =
+ Lists.newArrayList(
+ clientCustomOtel.readRows(Query.create(tableCustomOtel.getId()).limit(10)));
+
+ // This stopwatch is used for to limit fetching of metric data in verifyMetrics
+ Stopwatch metricsPollingStopwatch = Stopwatch.createStarted();
+
+ ProjectName name = ProjectName.of(testEnvRule.env().getProjectId());
+
+ Collection fromMetricReader = metricReader.collectAllMetrics();
+
+ // Interval is set in the monarch request when query metric timestamps.
+ // Restrict it to before we send to request and 3 minute after we send the request. If
+ // it turns out to be still flaky we can increase the filter range.
+ Instant end = start.plus(Duration.ofMinutes(3));
+ TimeInterval interval =
+ TimeInterval.newBuilder()
+ .setStartTime(Timestamps.fromMillis(start.toEpochMilli()))
+ .setEndTime(Timestamps.fromMillis(end.toEpochMilli()))
+ .build();
+
+ for (String view : VIEWS) {
+ String otelMetricName = view;
+ if (view.equals("application_blocking_latencies")) {
+ otelMetricName = "application_latencies";
+ }
+ MetricData dataFromReader = getMetricData(fromMetricReader, otelMetricName);
+
+ // Filter on instance and method name
+ // Verify that metrics are correct for MutateRows request
+ String metricFilter =
+ String.format(
+ "metric.type=\"bigtable.googleapis.com/client/%s\" "
+ + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\""
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view,
+ testEnvRule.env().getInstanceId(),
+ tableCustomOtel.getId(),
+ appProfileCustomOtel);
+ ListTimeSeriesRequest.Builder requestBuilder =
+ ListTimeSeriesRequest.newBuilder()
+ .setName(name.toString())
+ .setFilter(metricFilter)
+ .setInterval(interval)
+ .setView(ListTimeSeriesRequest.TimeSeriesView.FULL);
+
+ ListTimeSeriesResponse response =
+ verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ verifyMetricsWithMetricsReader(response, dataFromReader);
+
+ // Verify that metrics are correct for ReadRows request
+ metricFilter =
+ String.format(
+ "metric.type=\"bigtable.googleapis.com/client/%s\" "
+ + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\""
+ + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"",
+ view,
+ testEnvRule.env().getInstanceId(),
+ tableCustomOtel.getId(),
+ appProfileCustomOtel);
requestBuilder.setFilter(metricFilter);
- verifyMetricsArePublished(requestBuilder.build(), stopwatch, view);
+ response = verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view);
+ verifyMetricsWithMetricsReader(response, dataFromReader);
}
}
- private void verifyMetricsArePublished(
- ListTimeSeriesRequest request, Stopwatch stopwatch, String view) throws Exception {
+ private ListTimeSeriesResponse verifyMetricsArePublished(
+ ListTimeSeriesRequest request, Stopwatch metricsPollingStopwatch, String view)
+ throws Exception {
ListTimeSeriesResponse response = metricClient.listTimeSeriesCallable().call(request);
- logger.log(
- Level.INFO,
- "Checking for view "
- + view
- + ", has timeseries="
- + response.getTimeSeriesCount()
- + " stopwatch elapsed "
- + stopwatch.elapsed(TimeUnit.MINUTES));
- while (response.getTimeSeriesCount() == 0 && stopwatch.elapsed(TimeUnit.MINUTES) < 10) {
+ while (response.getTimeSeriesCount() == 0
+ && metricsPollingStopwatch.elapsed(TimeUnit.MINUTES) < 10) {
+ logger.log(
+ Level.INFO,
+ "Checking for view "
+ + view
+ + ", has timeseries="
+ + response.getTimeSeriesCount()
+ + " stopwatch elapsed "
+ + metricsPollingStopwatch.elapsed(TimeUnit.MINUTES));
// Call listTimeSeries every minute
Thread.sleep(Duration.ofMinutes(1).toMillis());
response = metricClient.listTimeSeriesCallable().call(request);
@@ -176,5 +348,64 @@ private void verifyMetricsArePublished(
assertWithMessage("View " + view + " didn't return any data.")
.that(response.getTimeSeriesCount())
.isGreaterThan(0);
+
+ return response;
+ }
+
+ private void verifyMetricsWithMetricsReader(
+ ListTimeSeriesResponse response, MetricData dataFromReader) {
+ for (TimeSeries ts : response.getTimeSeriesList()) {
+ Map attributesMap =
+ ImmutableMap.builder()
+ .putAll(ts.getResource().getLabelsMap())
+ .putAll(ts.getMetric().getLabelsMap())
+ .build();
+ AttributesBuilder attributesBuilder = Attributes.builder();
+ String streamingKey = BuiltinMetricsConstants.STREAMING_KEY.getKey();
+ attributesMap.forEach(
+ (k, v) -> {
+ if (!k.equals(streamingKey)) {
+ attributesBuilder.put(k, v);
+ }
+ });
+ if (attributesMap.containsKey(streamingKey)) {
+ attributesBuilder.put(streamingKey, Boolean.parseBoolean(attributesMap.get(streamingKey)));
+ }
+ Attributes attributes = attributesBuilder.build();
+ verifyAttributes(dataFromReader, attributes);
+ long expectedValue = getAggregatedValue(dataFromReader, attributes);
+ Timestamp startTime = getStartTimeSeconds(dataFromReader, attributes);
+ assertThat(startTime.getSeconds()).isGreaterThan(0);
+ List point =
+ ts.getPointsList().stream()
+ .filter(
+ p ->
+ Timestamps.compare(p.getInterval().getStartTime(), startTime) >= 0
+ && Timestamps.compare(
+ p.getInterval().getStartTime(),
+ Timestamps.add(
+ startTime,
+ com.google.protobuf.Duration.newBuilder()
+ .setSeconds(60)
+ .build()))
+ < 0)
+ .collect(Collectors.toList());
+ if (point.size() > 0) {
+ long actualValue = (long) point.get(0).getValue().getDistributionValue().getMean();
+ assertWithMessage(
+ "actual value does not match expected value, actual value "
+ + actualValue
+ + " expected value "
+ + expectedValue
+ + " actual start time "
+ + point.get(0).getInterval().getStartTime()
+ + " expected start time "
+ + startTime)
+ .that(actualValue)
+ .isIn(
+ Range.range(
+ expectedValue - 1, BoundType.CLOSED, expectedValue + 1, BoundType.CLOSED));
+ }
+ }
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
new file mode 100644
index 0000000000..56f6bfa476
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.it;
+
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.common.truth.Correspondence;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+
+public class MetricsITUtils {
+
+ static final Correspondence METRIC_DATA_NAME_CONTAINS =
+ Correspondence.from((md, s) -> md.getName().contains(s), "contains name");
+
+ static final Correspondence POINT_DATA_CLUSTER_ID_CONTAINS =
+ Correspondence.from(
+ (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY).contains(s),
+ "contains attributes");
+
+ static final Correspondence POINT_DATA_ZONE_ID_CONTAINS =
+ Correspondence.from(
+ (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY).contains(s),
+ "contains attributes");
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/SampleRowsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/SampleRowsIT.java
index 81fd553c8e..5e5567e3b1 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/SampleRowsIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/SampleRowsIT.java
@@ -15,21 +15,24 @@
*/
package com.google.cloud.bigtable.data.v2.it;
-import static com.google.cloud.bigtable.misc_utilities.AuthorizedViewTestHelper.AUTHORIZED_VIEW_COLUMN_QUALIFIER;
-import static com.google.cloud.bigtable.misc_utilities.AuthorizedViewTestHelper.AUTHORIZED_VIEW_ROW_PREFIX;
-import static com.google.cloud.bigtable.misc_utilities.AuthorizedViewTestHelper.createTestAuthorizedView;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.core.ApiFuture;
import com.google.api.core.ApiFutures;
import com.google.cloud.bigtable.admin.v2.models.AuthorizedView;
+import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest;
+import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest;
+import com.google.cloud.bigtable.admin.v2.models.SubsetView;
import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId;
import com.google.cloud.bigtable.data.v2.models.KeyOffset;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
import com.google.common.collect.Lists;
+import com.google.protobuf.ByteString;
+import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
@@ -75,42 +78,50 @@ public void testOnAuthorizedView()
.withMessage("AuthorizedView is not supported on Emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
-
- AuthorizedView testAuthorizedView = createTestAuthorizedView(testEnvRule);
+ AuthorizedView testAuthorizedView = createPreSplitTableAndAuthorizedView();
BigtableDataClient client = testEnvRule.env().getDataClient();
- String rowPrefix = AUTHORIZED_VIEW_ROW_PREFIX + UUID.randomUUID();
- String rowPrefixOutsideAuthorizedView = UUID.randomUUID() + "-outside-authorized-view";
- // Create some data so that sample row keys has something to show
- List> futures = Lists.newArrayList();
- for (int i = 0; i < 10; i++) {
- ApiFuture future =
- client.mutateRowAsync(
- RowMutation.create(testEnvRule.env().getTableId(), rowPrefix + "-" + i)
- .setCell(
- testEnvRule.env().getFamilyId(), AUTHORIZED_VIEW_COLUMN_QUALIFIER, "value"));
- futures.add(future);
- ApiFuture futureOutsideAuthorizedView =
- client.mutateRowAsync(
- RowMutation.create(
- testEnvRule.env().getTableId(), rowPrefixOutsideAuthorizedView + "-" + i)
- .setCell(
- testEnvRule.env().getFamilyId(), AUTHORIZED_VIEW_COLUMN_QUALIFIER, "value"));
- futures.add(futureOutsideAuthorizedView);
+ ApiFuture> future =
+ client.sampleRowKeysAsync(
+ AuthorizedViewId.of(testAuthorizedView.getTableId(), testAuthorizedView.getId()));
+
+ List results = future.get(1, TimeUnit.MINUTES);
+
+ List resultKeys = new ArrayList<>();
+ for (KeyOffset keyOffset : results) {
+ resultKeys.add(keyOffset.getKey());
}
- ApiFutures.allAsList(futures).get(1, TimeUnit.MINUTES);
- ApiFuture> future = client.sampleRowKeysAsync(testEnvRule.env().getTableId());
+ assertThat(resultKeys)
+ .containsExactly(
+ ByteString.copyFromUtf8("food"),
+ ByteString.copyFromUtf8("fool"),
+ ByteString.copyFromUtf8("fop"));
- List results = future.get(1, TimeUnit.MINUTES);
+ testEnvRule
+ .env()
+ .getTableAdminClient()
+ .deleteAuthorizedView(testAuthorizedView.getTableId(), testAuthorizedView.getId());
+ }
- assertThat(results).isNotEmpty();
- assertThat(results.get(results.size() - 1).getOffsetBytes()).isGreaterThan(0L);
+ private static AuthorizedView createPreSplitTableAndAuthorizedView() {
+ String tableId = UUID.randomUUID().toString();
+ String authorizedViewId = UUID.randomUUID().toString();
testEnvRule
.env()
.getTableAdminClient()
- .deleteAuthorizedView(testEnvRule.env().getTableId(), testAuthorizedView.getId());
+ .createTable(
+ CreateTableRequest.of(tableId)
+ .addSplit(ByteString.copyFromUtf8("apple"))
+ .addSplit(ByteString.copyFromUtf8("food"))
+ .addSplit(ByteString.copyFromUtf8("fool"))
+ .addSplit(ByteString.copyFromUtf8("good")));
+ CreateAuthorizedViewRequest request =
+ CreateAuthorizedViewRequest.of(tableId, authorizedViewId)
+ .setAuthorizedViewType(SubsetView.create().addRowPrefix("foo"))
+ .setDeletionProtection(false);
+ return testEnvRule.env().getTableAdminClient().createAuthorizedView(request);
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
index b0e12d5ade..84ab24f1c8 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
@@ -15,37 +15,76 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.core.ApiFuture;
import com.google.api.gax.rpc.NotFoundException;
import com.google.cloud.bigtable.admin.v2.models.Cluster;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
-import com.google.cloud.bigtable.stats.BuiltinViews;
-import com.google.cloud.bigtable.stats.StatsWrapper;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
import com.google.common.collect.Lists;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.io.IOException;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
public class StreamingMetricsMetadataIT {
@ClassRule public static TestEnvRule testEnvRule = new TestEnvRule();
- @BeforeClass
- public static void setUpClass() {
+ private BigtableDataClient client;
+ private InMemoryMetricReader metricReader;
+
+ @Before
+ public void setup() throws IOException {
assume()
.withMessage("StreamingMetricsMetadataIT is not supported on Emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- BuiltinViews.registerBigtableBuiltinViews();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry));
+ client = BigtableDataClient.create(settings.build());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ if (client != null) {
+ client.close();
+ }
}
@Test
@@ -54,7 +93,7 @@ public void testSuccess() throws Exception {
String uniqueKey = prefix + "-read";
Query query = Query.create(testEnvRule.env().getTableId()).rowKey(uniqueKey);
- ArrayList rows = Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query));
+ ArrayList rows = Lists.newArrayList(client.readRows(query));
ApiFuture> clustersFuture =
testEnvRule
@@ -64,27 +103,73 @@ public void testSuccess() throws Exception {
List clusters = clustersFuture.get(1, TimeUnit.MINUTES);
- // give opencensus some time to populate view data
- Thread.sleep(100);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ metricReader.collectAllMetrics().stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains(clusters.get(0).getZone());
- assertThat(tagValueStrings).contains(clusters.get(0).getId());
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
+
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS)
+ .contains(clusters.get(0).getZone());
+ assertThat(clusterAttributes).contains(clusters.get(0).getId());
+ assertThat(zoneAttributes).contains(clusters.get(0).getZone());
}
@Test
- public void testFailure() throws InterruptedException {
+ public void testFailure() {
Query query = Query.create("non-exist-table");
try {
- Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query));
+ Lists.newArrayList(client.readRows(query));
} catch (NotFoundException e) {
}
- // give opencensus some time to populate view data
- Thread.sleep(100);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ metricReader.collectAllMetrics().stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
+
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains("unspecified");
- assertThat(tagValueStrings).contains("global");
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains("unspecified");
+ assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global");
+ assertThat(clusterAttributes).contains("unspecified");
+ assertThat(zoneAttributes).contains("global");
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
index aa2a4317fc..ad5f71db8f 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
@@ -15,35 +15,76 @@
*/
package com.google.cloud.bigtable.data.v2.it;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS;
+import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS;
import static com.google.common.truth.Truth.assertThat;
import static com.google.common.truth.TruthJUnit.assume;
import com.google.api.core.ApiFuture;
import com.google.api.gax.rpc.NotFoundException;
import com.google.cloud.bigtable.admin.v2.models.Cluster;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
-import com.google.cloud.bigtable.stats.BuiltinViews;
-import com.google.cloud.bigtable.stats.StatsWrapper;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
+import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView;
+import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
+import io.opentelemetry.api.OpenTelemetry;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.data.PointData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
-import org.junit.BeforeClass;
+import java.util.stream.Collectors;
+import org.junit.After;
+import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
public class UnaryMetricsMetadataIT {
@ClassRule public static TestEnvRule testEnvRule = new TestEnvRule();
- @BeforeClass
- public static void setUpClass() {
+ private BigtableDataClient client;
+ private InMemoryMetricReader metricReader;
+
+ @Before
+ public void setup() throws IOException {
assume()
.withMessage("UnaryMetricsMetadataIT is not supported on Emulator")
.that(testEnvRule.env())
.isNotInstanceOf(EmulatorEnv.class);
- BuiltinViews.registerBigtableBuiltinViews();
+
+ BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+ BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider);
+ OpenTelemetry openTelemetry =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
+ settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry));
+
+ client = BigtableDataClient.create(settings.build());
+ }
+
+ @After
+ public void tearDown() throws IOException {
+ if (client != null) {
+ client.close();
+ }
}
@Test
@@ -52,9 +93,7 @@ public void testSuccess() throws Exception {
String familyId = testEnvRule.env().getFamilyId();
ApiFuture future =
- testEnvRule
- .env()
- .getDataClient()
+ client
.mutateRowCallable()
.futureCall(
RowMutation.create(testEnvRule.env().getTableId(), rowKey)
@@ -69,18 +108,36 @@ public void testSuccess() throws Exception {
.listClustersAsync(testEnvRule.env().getInstanceId());
List clusters = clustersFuture.get(1, TimeUnit.MINUTES);
- // give opencensus some time to populate view data
- for (int i = 0; i < 10; i++) {
- if (StatsWrapper.getOperationLatencyViewTagValueStrings()
- .contains(clusters.get(0).getZone())) {
- break;
- }
- Thread.sleep(100);
- }
+ Collection allMetricData = metricReader.collectAllMetrics();
+ List metrics =
+ allMetricData.stream()
+ .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
+ .collect(Collectors.toList());
+
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metrics).hasSize(1);
+
+ MetricData metricData = metrics.get(0);
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains(clusters.get(0).getZone());
- assertThat(tagValueStrings).contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains(clusters.get(0).getId());
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS)
+ .contains(clusters.get(0).getZone());
+ assertThat(clusterAttributes).contains(clusters.get(0).getId());
+ assertThat(zoneAttributes).contains(clusters.get(0).getZone());
}
@Test
@@ -89,9 +146,7 @@ public void testFailure() throws Exception {
String familyId = testEnvRule.env().getFamilyId();
ApiFuture future =
- testEnvRule
- .env()
- .getDataClient()
+ client
.mutateRowCallable()
.futureCall(
RowMutation.create("non-exist-table", rowKey).setCell(familyId, "q", "myVal"));
@@ -106,16 +161,39 @@ public void testFailure() throws Exception {
}
}
- // give opencensus some time to populate view data
- for (int i = 0; i < 10; i++) {
- if (StatsWrapper.getOperationLatencyViewTagValueStrings().contains("unspecified")) {
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = null;
+ for (MetricData md : allMetricData) {
+ if (md.getName()
+ .equals(
+ BuiltinMetricsConstants.METER_NAME
+ + BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME)) {
+ metricData = md;
break;
}
- Thread.sleep(100);
}
- List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings();
- assertThat(tagValueStrings).contains("unspecified");
- assertThat(tagValueStrings).contains("global");
+ assertThat(allMetricData)
+ .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
+ .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
+ assertThat(metricData).isNotNull();
+
+ List pointData = new ArrayList<>(metricData.getData().getPoints());
+
+ assertThat(pointData)
+ .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS)
+ .contains("unspecified");
+ assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global");
+ List clusterAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
+ .collect(Collectors.toList());
+ List zoneAttributes =
+ pointData.stream()
+ .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
+ .collect(Collectors.toList());
+
+ assertThat(clusterAttributes).contains("unspecified");
+ assertThat(zoneAttributes).contains("global");
}
}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
index 79cbccb0ac..290fcc321f 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
@@ -885,6 +885,7 @@ public void enableRetryInfoFalseValueTest() throws IOException {
"generateInitialChangeStreamPartitionsSettings",
"readChangeStreamSettings",
"pingAndWarmSettings",
+ "metricsProvider",
};
@Test
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
index 1975d0da25..abbf46c468 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
@@ -18,12 +18,9 @@
import static com.google.common.truth.Truth.assertThat;
import static org.junit.Assert.assertThrows;
-import com.google.api.gax.core.NoCredentialsProvider;
import com.google.api.gax.grpc.GrpcStatusCode;
-import com.google.api.gax.grpc.GrpcTransportChannel;
import com.google.api.gax.rpc.ApiException;
import com.google.api.gax.rpc.ErrorDetails;
-import com.google.api.gax.rpc.FixedTransportChannelProvider;
import com.google.api.gax.rpc.InternalException;
import com.google.api.gax.rpc.UnavailableException;
import com.google.bigtable.v2.BigtableGrpc;
@@ -45,6 +42,7 @@
import com.google.bigtable.v2.SampleRowKeysResponse;
import com.google.cloud.bigtable.data.v2.BigtableDataClient;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
+import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
import com.google.cloud.bigtable.data.v2.models.BulkMutation;
import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation;
import com.google.cloud.bigtable.data.v2.models.Filters;
@@ -55,22 +53,31 @@
import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
+import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Queues;
import com.google.protobuf.Any;
import com.google.rpc.RetryInfo;
+import io.grpc.ForwardingServerCall;
import io.grpc.Metadata;
+import io.grpc.MethodDescriptor;
+import io.grpc.Server;
+import io.grpc.ServerCall;
+import io.grpc.ServerCallHandler;
+import io.grpc.ServerInterceptor;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.grpc.stub.StreamObserver;
-import io.grpc.testing.GrpcServerRule;
import java.io.IOException;
import java.time.Duration;
+import java.util.HashSet;
import java.util.Queue;
+import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
+import java.util.stream.Collectors;
+import org.junit.After;
import org.junit.Before;
-import org.junit.Rule;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
@@ -78,12 +85,13 @@
@RunWith(JUnit4.class)
public class RetryInfoTest {
- @Rule public GrpcServerRule serverRule = new GrpcServerRule();
-
private static final Metadata.Key ERROR_DETAILS_KEY =
Metadata.Key.of("grpc-status-details-bin", Metadata.BINARY_BYTE_MARSHALLER);
+ private final Set methods = new HashSet<>();
+
private FakeBigtableService service;
+ private Server server;
private BigtableDataClient client;
private BigtableDataSettings.Builder settings;
@@ -94,29 +102,111 @@ public class RetryInfoTest {
@Before
public void setUp() throws IOException {
service = new FakeBigtableService();
- serverRule.getServiceRegistry().addService(service);
+
+ ServerInterceptor serverInterceptor =
+ new ServerInterceptor() {
+ @Override
+ public ServerCall.Listener interceptCall(
+ ServerCall serverCall,
+ Metadata metadata,
+ ServerCallHandler serverCallHandler) {
+ return serverCallHandler.startCall(
+ new ForwardingServerCall.SimpleForwardingServerCall(serverCall) {
+ @Override
+ public void close(Status status, Metadata trailers) {
+ if (trailers.containsKey(ERROR_DETAILS_KEY)) {
+ methods.add(serverCall.getMethodDescriptor().getBareMethodName());
+ }
+ super.close(status, trailers);
+ }
+ },
+ metadata);
+ }
+ };
+ server = FakeServiceBuilder.create(service).intercept(serverInterceptor).start();
settings =
- BigtableDataSettings.newBuilder()
+ BigtableDataSettings.newBuilderForEmulator(server.getPort())
.setProjectId("fake-project")
- .setInstanceId("fake-instance")
- .setCredentialsProvider(NoCredentialsProvider.create());
-
- settings
- .stubSettings()
- .setTransportChannelProvider(
- FixedTransportChannelProvider.create(
- GrpcTransportChannel.create(serverRule.getChannel())))
- // channel priming doesn't work with FixedTransportChannelProvider. Disable it for the test
- .setRefreshingChannel(false)
- .build();
+ .setInstanceId("fake-instance");
this.client = BigtableDataClient.create(settings.build());
}
+ @After
+ public void tearDown() {
+ if (client != null) {
+ client.close();
+ }
+ if (server != null) {
+ server.shutdown();
+ }
+ }
+
@Test
- public void testReadRow() {
- verifyRetryInfoIsUsed(() -> client.readRow("table", "row"), true);
+ public void testAllMethods() {
+ // Verify retry info is handled correctly for all the methods in data API.
+ verifyRetryInfoIsUsed(() -> client.readRow(TableId.of("table"), "row"), true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () -> client.readRows(Query.create(TableId.of("table"))).iterator().hasNext(), true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () ->
+ client.bulkMutateRows(
+ BulkMutation.create(TableId.of("fake-table"))
+ .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () ->
+ client.mutateRow(
+ RowMutation.create(TableId.of("fake-table"), "key").setCell("cf", "q", "v")),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(() -> client.sampleRowKeys(TableId.of("table")), true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () ->
+ client.checkAndMutateRow(
+ ConditionalRowMutation.create("table", "key")
+ .condition(Filters.FILTERS.value().regex("old-value"))
+ .then(Mutation.create().setCell("cf", "q", "v"))),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () ->
+ client.readModifyWriteRow(
+ ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () -> client.readChangeStream(ReadChangeStreamQuery.create("table")).iterator().hasNext(),
+ true);
+
+ attemptCounter.set(0);
+ verifyRetryInfoIsUsed(
+ () -> client.generateInitialChangeStreamPartitions("table").iterator().hasNext(), true);
+
+ // Verify that the new data API methods are tested or excluded. This is enforced by
+ // introspecting grpc
+ // method descriptors.
+ Set expected =
+ BigtableGrpc.getServiceDescriptor().getMethods().stream()
+ .map(MethodDescriptor::getBareMethodName)
+ .collect(Collectors.toSet());
+
+ // Exclude methods that don't support retry info
+ methods.add("PingAndWarm");
+
+ assertThat(methods).containsExactlyElementsIn(expected);
}
@Test
@@ -147,11 +237,6 @@ public void testReadRowServerNotReturningRetryInfoClientDisabledHandling() throw
}
}
- @Test
- public void testReadRows() {
- verifyRetryInfoIsUsed(() -> client.readRows(Query.create("table")).iterator().hasNext(), true);
- }
-
@Test
public void testReadRowsNonRetraybleErrorWithRetryInfo() {
verifyRetryInfoIsUsed(() -> client.readRows(Query.create("table")).iterator().hasNext(), false);
@@ -181,16 +266,6 @@ public void testReadRowsServerNotReturningRetryInfoClientDisabledHandling() thro
}
}
- @Test
- public void testMutateRows() {
- verifyRetryInfoIsUsed(
- () ->
- client.bulkMutateRows(
- BulkMutation.create("fake-table")
- .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))),
- true);
- }
-
@Test
public void testMutateRowsNonRetryableErrorWithRetryInfo() {
verifyRetryInfoIsUsed(
@@ -238,12 +313,6 @@ public void testMutateRowsServerNotReturningRetryInfoClientDisabledHandling() th
}
}
- @Test
- public void testMutateRow() {
- verifyRetryInfoIsUsed(
- () -> client.mutateRow(RowMutation.create("table", "key").setCell("cf", "q", "v")), true);
- }
-
@Test
public void testMutateRowNonRetryableErrorWithRetryInfo() {
verifyRetryInfoIsUsed(
@@ -278,11 +347,6 @@ public void testMutateRowServerNotReturningRetryInfoClientDisabledHandling() thr
}
}
- @Test
- public void testSampleRowKeys() {
- verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), true);
- }
-
@Test
public void testSampleRowKeysNonRetryableErrorWithRetryInfo() {
verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), false);
@@ -312,17 +376,6 @@ public void testSampleRowKeysServerNotReturningRetryInfoClientDisabledHandling()
}
}
- @Test
- public void testCheckAndMutateRow() {
- verifyRetryInfoIsUsed(
- () ->
- client.checkAndMutateRow(
- ConditionalRowMutation.create("table", "key")
- .condition(Filters.FILTERS.value().regex("old-value"))
- .then(Mutation.create().setCell("cf", "q", "v"))),
- true);
- }
-
@Test
public void testCheckAndMutateDisableRetryInfo() throws IOException {
settings.stubSettings().setEnableRetryInfo(false);
@@ -368,15 +421,6 @@ public void testCheckAndMutateServerNotReturningRetryInfoClientDisabledHandling(
}
}
- @Test
- public void testReadModifyWrite() {
- verifyRetryInfoIsUsed(
- () ->
- client.readModifyWriteRow(
- ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")),
- true);
- }
-
@Test
public void testReadModifyWriteDisableRetryInfo() throws IOException {
settings.stubSettings().setEnableRetryInfo(false);
@@ -414,13 +458,6 @@ public void testReadModifyWriteNotReturningRetryInfoClientDisabledHandling() thr
}
}
- @Test
- public void testReadChangeStream() {
- verifyRetryInfoIsUsed(
- () -> client.readChangeStream(ReadChangeStreamQuery.create("table")).iterator().hasNext(),
- true);
- }
-
@Test
public void testReadChangeStreamNonRetryableErrorWithRetryInfo() {
verifyRetryInfoIsUsed(
@@ -465,12 +502,6 @@ public void testReadChangeStreamNotReturningRetryInfoClientDisabledHandling() th
}
}
- @Test
- public void testGenerateInitialChangeStreamPartition() {
- verifyRetryInfoIsUsed(
- () -> client.generateInitialChangeStreamPartitions("table").iterator().hasNext(), true);
- }
-
@Test
public void testGenerateInitialChangeStreamPartitionNonRetryableError() {
verifyRetryInfoIsUsed(
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java
new file mode 100644
index 0000000000..a0b9c058dc
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java
@@ -0,0 +1,310 @@
+/*
+ * Copyright 2023 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+import static com.google.common.truth.Truth.assertThat;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import com.google.api.Distribution;
+import com.google.api.MonitoredResource;
+import com.google.api.core.ApiFuture;
+import com.google.api.core.ApiFutures;
+import com.google.api.gax.rpc.UnaryCallable;
+import com.google.cloud.monitoring.v3.MetricServiceClient;
+import com.google.cloud.monitoring.v3.stub.MetricServiceStub;
+import com.google.common.collect.ImmutableList;
+import com.google.monitoring.v3.CreateTimeSeriesRequest;
+import com.google.monitoring.v3.TimeSeries;
+import com.google.protobuf.Empty;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.common.InstrumentationScopeInfo;
+import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData;
+import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData;
+import io.opentelemetry.sdk.resources.Resource;
+import java.util.Arrays;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.mockito.ArgumentCaptor;
+import org.mockito.Mock;
+import org.mockito.junit.MockitoJUnit;
+import org.mockito.junit.MockitoRule;
+
+public class BigtableCloudMonitoringExporterTest {
+ private static final String projectId = "fake-project";
+ private static final String instanceId = "fake-instance";
+ private static final String appProfileId = "default";
+ private static final String tableId = "fake-table";
+ private static final String zone = "us-east-1";
+ private static final String cluster = "cluster-1";
+
+ private static final String clientName = "fake-client-name";
+ private static final String taskId = "fake-task-id";
+
+ @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule();
+
+ @Mock private MetricServiceStub mockMetricServiceStub;
+ private MetricServiceClient fakeMetricServiceClient;
+ private BigtableCloudMonitoringExporter exporter;
+
+ private Attributes attributes;
+ private Resource resource;
+ private InstrumentationScopeInfo scope;
+
+ @Before
+ public void setUp() {
+ fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub);
+
+ exporter =
+ new BigtableCloudMonitoringExporter(
+ projectId, fakeMetricServiceClient, /* applicationResource= */ null, taskId);
+
+ attributes =
+ Attributes.builder()
+ .put(BIGTABLE_PROJECT_ID_KEY, projectId)
+ .put(INSTANCE_ID_KEY, instanceId)
+ .put(TABLE_ID_KEY, tableId)
+ .put(CLUSTER_ID_KEY, cluster)
+ .put(ZONE_ID_KEY, zone)
+ .put(APP_PROFILE_KEY, appProfileId)
+ .build();
+
+ resource = Resource.create(Attributes.empty());
+
+ scope = InstrumentationScopeInfo.create(BuiltinMetricsConstants.METER_NAME);
+ }
+
+ @After
+ public void tearDown() {}
+
+ @Test
+ public void testExportingSumData() {
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long fakeValue = 11L;
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ LongPointData longPointData =
+ ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue);
+
+ MetricData longData =
+ ImmutableMetricData.createLongSum(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/retry_count",
+ "description",
+ "1",
+ ImmutableSumData.create(
+ true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData)));
+
+ exporter.export(Arrays.asList(longData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly(
+ BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
+ INSTANCE_ID_KEY.getKey(), instanceId,
+ TABLE_ID_KEY.getKey(), tableId,
+ CLUSTER_ID_KEY.getKey(), cluster,
+ ZONE_ID_KEY.getKey(), zone);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
+ assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue);
+ assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
+ .isEqualTo(startEpoch);
+ assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch);
+ }
+
+ @Test
+ public void testExportingHistogramData() {
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ HistogramPointData histogramPointData =
+ ImmutableHistogramPointData.create(
+ startEpoch,
+ endEpoch,
+ attributes,
+ 3d,
+ true,
+ 1d, // min
+ true,
+ 2d, // max
+ Arrays.asList(1.0),
+ Arrays.asList(1L, 2L));
+
+ MetricData histogramData =
+ ImmutableMetricData.createDoubleHistogram(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/operation_latencies",
+ "description",
+ "ms",
+ ImmutableHistogramData.create(
+ AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData)));
+
+ exporter.export(Arrays.asList(histogramData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly(
+ BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
+ INSTANCE_ID_KEY.getKey(), instanceId,
+ TABLE_ID_KEY.getKey(), tableId,
+ CLUSTER_ID_KEY.getKey(), cluster,
+ ZONE_ID_KEY.getKey(), zone);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
+ Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue();
+ assertThat(distribution.getCount()).isEqualTo(3);
+ assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
+ .isEqualTo(startEpoch);
+ assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch);
+ }
+
+ @Test
+ public void testTimeSeriesForMetricWithGceOrGkeResource() {
+ String gceProjectId = "fake-gce-project";
+ BigtableCloudMonitoringExporter exporter =
+ new BigtableCloudMonitoringExporter(
+ projectId,
+ fakeMetricServiceClient,
+ MonitoredResource.newBuilder()
+ .setType("gce-instance")
+ .putLabels("some-gce-key", "some-gce-value")
+ .putLabels("project_id", gceProjectId)
+ .build(),
+ taskId);
+ ArgumentCaptor argumentCaptor =
+ ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
+
+ UnaryCallable mockCallable = mock(UnaryCallable.class);
+ when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable);
+ ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance());
+ when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future);
+
+ long startEpoch = 10;
+ long endEpoch = 15;
+ HistogramPointData histogramPointData =
+ ImmutableHistogramPointData.create(
+ startEpoch,
+ endEpoch,
+ Attributes.of(
+ BIGTABLE_PROJECT_ID_KEY,
+ projectId,
+ INSTANCE_ID_KEY,
+ instanceId,
+ APP_PROFILE_KEY,
+ appProfileId,
+ CLIENT_NAME_KEY,
+ clientName),
+ 3d,
+ true,
+ 1d, // min
+ true,
+ 2d, // max
+ Arrays.asList(1.0),
+ Arrays.asList(1L, 2L));
+
+ MetricData histogramData =
+ ImmutableMetricData.createDoubleHistogram(
+ resource,
+ scope,
+ "bigtable.googleapis.com/internal/client/per_connection_error_count",
+ "description",
+ "ms",
+ ImmutableHistogramData.create(
+ AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData)));
+
+ exporter.export(Arrays.asList(histogramData));
+
+ CreateTimeSeriesRequest request = argumentCaptor.getValue();
+
+ assertThat(request.getName()).isEqualTo("projects/" + gceProjectId);
+ assertThat(request.getTimeSeriesList()).hasSize(1);
+
+ com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0);
+
+ assertThat(timeSeries.getResource().getLabelsMap())
+ .containsExactly("some-gce-key", "some-gce-value", "project_id", gceProjectId);
+
+ assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5);
+ assertThat(timeSeries.getMetric().getLabelsMap())
+ .containsAtLeast(
+ BIGTABLE_PROJECT_ID_KEY.getKey(),
+ projectId,
+ INSTANCE_ID_KEY.getKey(),
+ instanceId,
+ APP_PROFILE_KEY.getKey(),
+ appProfileId,
+ CLIENT_NAME_KEY.getKey(),
+ clientName,
+ CLIENT_UID_KEY.getKey(),
+ taskId);
+ }
+
+ private static class FakeMetricServiceClient extends MetricServiceClient {
+
+ protected FakeMetricServiceClient(MetricServiceStub stub) {
+ super(stub);
+ }
+ }
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
index 5d16b623fd..a12dd3cfbd 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
@@ -45,7 +45,6 @@
import com.google.cloud.bigtable.data.v2.models.SampleRowKeysRequest;
import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
-import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
import com.google.common.collect.ImmutableMap;
import io.grpc.ForwardingServerCall.SimpleForwardingServerCall;
import io.grpc.Metadata;
@@ -126,16 +125,21 @@ public void sendHeaders(Metadata headers) {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings stubSettings =
- settings
- .getStubSettings()
+
+ ClientContext clientContext =
+ EnhancedBigtableStub.createClientContext(settings.getStubSettings());
+ clientContext =
+ clientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder()))
+ settings.getStubSettings(),
+ Tags.getTagger(),
+ localStats.getStatsRecorder(),
+ null))
.build();
- attempts = stubSettings.readRowsSettings().getRetrySettings().getMaxAttempts();
- stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings));
+ attempts = settings.getStubSettings().readRowsSettings().getRetrySettings().getMaxAttempts();
+ stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext);
// Create another server without injecting the server-timing header and another stub that
// connects to it.
@@ -147,18 +151,21 @@ public void sendHeaders(Metadata headers) {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings noHeaderStubSettings =
- noHeaderSettings
- .getStubSettings()
+
+ ClientContext noHeaderClientContext =
+ EnhancedBigtableStub.createClientContext(noHeaderSettings.getStubSettings());
+ noHeaderClientContext =
+ noHeaderClientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
noHeaderSettings.getStubSettings(),
Tags.getTagger(),
- localStats.getStatsRecorder()))
+ localStats.getStatsRecorder(),
+ null))
.build();
noHeaderStub =
- new EnhancedBigtableStub(noHeaderStubSettings, ClientContext.create(noHeaderStubSettings));
+ new EnhancedBigtableStub(noHeaderSettings.getStubSettings(), noHeaderClientContext);
}
@After
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
new file mode 100644
index 0000000000..09b7e1f663
--- /dev/null
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
@@ -0,0 +1,112 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package com.google.cloud.bigtable.data.v2.stub.metrics;
+
+import static com.google.common.truth.Truth.assertThat;
+
+import com.google.api.core.InternalApi;
+import com.google.protobuf.Timestamp;
+import com.google.protobuf.util.Timestamps;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.LongPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import java.util.Collection;
+import java.util.List;
+import java.util.stream.Collectors;
+import org.junit.Assert;
+
+@InternalApi
+public class BuiltinMetricsTestUtils {
+
+ private BuiltinMetricsTestUtils() {}
+
+ public static MetricData getMetricData(Collection allMetricData, String metricName) {
+ List metricDataList =
+ allMetricData.stream()
+ .filter(md -> md.getName().equals(BuiltinMetricsConstants.METER_NAME + metricName))
+ .collect(Collectors.toList());
+ if (metricDataList.size() == 0) {
+ allMetricData.stream().forEach(md -> System.out.println(md.getName()));
+ }
+ assertThat(metricDataList.size()).isEqualTo(1);
+
+ return metricDataList.get(0);
+ }
+
+ public static long getAggregatedValue(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ HistogramPointData hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return (long) hd.getSum() / hd.getCount();
+ case LONG_SUM:
+ LongPointData ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return ld.getValue();
+ default:
+ return 0;
+ }
+ }
+
+ public static Timestamp getStartTimeSeconds(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ HistogramPointData hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return Timestamps.fromNanos(hd.getStartEpochNanos());
+ case LONG_SUM:
+ LongPointData ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList())
+ .get(0);
+ return Timestamps.fromNanos(ld.getStartEpochNanos());
+ default:
+ return Timestamp.getDefaultInstance();
+ }
+ }
+
+ public static void verifyAttributes(MetricData metricData, Attributes attributes) {
+ switch (metricData.getType()) {
+ case HISTOGRAM:
+ List hd =
+ metricData.getHistogramData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList());
+ assertThat(hd).isNotEmpty();
+ break;
+ case LONG_SUM:
+ List ld =
+ metricData.getLongSumData().getPoints().stream()
+ .filter(pd -> pd.getAttributes().equals(attributes))
+ .collect(Collectors.toList());
+ assertThat(ld).isNotEmpty();
+ break;
+ default:
+ Assert.fail("Unexpected type");
+ }
+ }
+}
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
index 06b923cad3..2dd4bcabb3 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
@@ -15,14 +15,24 @@
*/
package com.google.cloud.bigtable.data.v2.stub.metrics;
-import static com.google.api.gax.tracing.ApiTracerFactory.OperationType;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData;
+import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes;
import static com.google.common.truth.Truth.assertThat;
-import static org.junit.Assert.assertThrows;
-import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.Mockito.timeout;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
-import static org.mockito.Mockito.when;
import com.google.api.client.util.Lists;
import com.google.api.core.ApiFunction;
@@ -36,7 +46,6 @@
import com.google.api.gax.rpc.NotFoundException;
import com.google.api.gax.rpc.ResponseObserver;
import com.google.api.gax.rpc.StreamController;
-import com.google.api.gax.tracing.SpanName;
import com.google.bigtable.v2.BigtableGrpc;
import com.google.bigtable.v2.MutateRowRequest;
import com.google.bigtable.v2.MutateRowResponse;
@@ -45,6 +54,7 @@
import com.google.bigtable.v2.ReadRowsRequest;
import com.google.bigtable.v2.ReadRowsResponse;
import com.google.bigtable.v2.ResponseParams;
+import com.google.cloud.bigtable.Version;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId;
@@ -52,9 +62,9 @@
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
+import com.google.cloud.bigtable.data.v2.models.TableId;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapper;
import com.google.common.base.Stopwatch;
import com.google.common.collect.Range;
import com.google.protobuf.ByteString;
@@ -77,11 +87,21 @@
import io.grpc.StatusRuntimeException;
import io.grpc.stub.ServerCallStreamObserver;
import io.grpc.stub.StreamObserver;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
import java.nio.charset.Charset;
import java.util.ArrayList;
+import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
@@ -92,12 +112,8 @@
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
-import org.mockito.ArgumentCaptor;
-import org.mockito.Captor;
-import org.mockito.Mock;
import org.mockito.junit.MockitoJUnit;
import org.mockito.junit.MockitoRule;
-import org.mockito.stubbing.Answer;
import org.threeten.bp.Duration;
@RunWith(JUnit4.class)
@@ -105,8 +121,8 @@ public class BuiltinMetricsTracerTest {
private static final String PROJECT_ID = "fake-project";
private static final String INSTANCE_ID = "fake-instance";
private static final String APP_PROFILE_ID = "default";
- private static final String TABLE_ID = "fake-table";
- private static final String AUTHORIZED_VIEW_ID = "fake-authorized-view";
+ private static final String TABLE = "fake-table";
+
private static final String BAD_TABLE_ID = "non-exist-table";
private static final String ZONE = "us-west-1";
private static final String CLUSTER = "cluster-0";
@@ -114,6 +130,7 @@ public class BuiltinMetricsTracerTest {
private static final long SERVER_LATENCY = 100;
private static final long APPLICATION_LATENCY = 200;
private static final long SLEEP_VARIABILITY = 15;
+ private static final String CLIENT_NAME = "java-bigtable/" + Version.VERSION;
private static final long CHANNEL_BLOCKING_LATENCY = 75;
@@ -124,18 +141,35 @@ public class BuiltinMetricsTracerTest {
private EnhancedBigtableStub stub;
- @Mock private BuiltinMetricsTracerFactory mockFactory;
- @Mock private StatsRecorderWrapper statsRecorderWrapper;
+ private int batchElementCount = 2;
- @Captor private ArgumentCaptor status;
- @Captor private ArgumentCaptor tableId;
- @Captor private ArgumentCaptor zone;
- @Captor private ArgumentCaptor cluster;
+ private Attributes baseAttributes;
- private int batchElementCount = 2;
+ private InMemoryMetricReader metricReader;
@Before
public void setUp() throws Exception {
+ metricReader = InMemoryMetricReader.create();
+
+ baseAttributes =
+ Attributes.builder()
+ .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, PROJECT_ID)
+ .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, INSTANCE_ID)
+ .put(BuiltinMetricsConstants.APP_PROFILE_KEY, APP_PROFILE_ID)
+ .build();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ meterProvider.registerView(entry.getKey(), entry.getValue());
+ }
+
+ OpenTelemetrySdk otel =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+ BuiltinMetricsTracerFactory facotry = BuiltinMetricsTracerFactory.create(otel, baseAttributes);
+
// Add an interceptor to add server-timing in headers
ServerInterceptor trailersInterceptor =
new ServerInterceptor() {
@@ -216,7 +250,8 @@ public void sendMessage(ReqT message) {
.setMaxOutstandingRequestBytes(1001L)
.build())
.build());
- stubSettingsBuilder.setTracerFactory(mockFactory);
+
+ stubSettingsBuilder.setTracerFactory(facotry);
InstantiatingGrpcChannelProvider.Builder channelProvider =
((InstantiatingGrpcChannelProvider) stubSettingsBuilder.getTransportChannelProvider())
@@ -247,117 +282,117 @@ public void tearDown() {
@Test
public void testReadRowsOperationLatencies() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
Stopwatch stopwatch = Stopwatch.createStarted();
- Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator());
+ Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)).iterator());
long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- // verify record operation is only called once
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(STREAMING_KEY, true)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
- assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed));
- assertThat(status.getAllValues()).containsExactly("OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly(ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed));
}
@Test
public void testReadRowsOperationLatenciesOnAuthorizedView() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
+ String authorizedViewId = "test-authorized-view-id";
Stopwatch stopwatch = Stopwatch.createStarted();
Lists.newArrayList(
- stub.readRowsCallable()
- .call(Query.create(AuthorizedViewId.of(TABLE_ID, AUTHORIZED_VIEW_ID)))
- .iterator());
+ stub.readRowsCallable().call(Query.create(AuthorizedViewId.of(TABLE, authorizedViewId))));
long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- // verify record operation is only called once
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(STREAMING_KEY, true)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed));
- assertThat(status.getAllValues()).containsExactly("OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly(ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER);
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed));
}
@Test
public void testGfeMetrics() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
- ArgumentCaptor gfeLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor gfeMissingHeaders = ArgumentCaptor.forClass(Long.class);
-
- Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)));
-
- // Verify record attempt are called multiple times
- verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
-
- // The request was retried and gfe latency is only recorded in the retry attempt
- verify(statsRecorderWrapper).putGfeLatencies(gfeLatency.capture());
- assertThat(gfeLatency.getValue()).isEqualTo(FAKE_SERVER_TIMING);
-
- // The first time the request was retried, it'll increment missing header counter
- verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get()))
- .putGfeMissingHeaders(gfeMissingHeaders.capture());
- assertThat(gfeMissingHeaders.getAllValues()).containsExactly(1L, 0L);
-
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID);
- assertThat(zone.getAllValues()).containsExactly("global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER);
+ Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)));
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+
+ MetricData serverLatenciesMetricData = getMetricData(allMetricData, SERVER_LATENCIES_NAME);
+
+ long serverLatencies = getAggregatedValue(serverLatenciesMetricData, expectedAttributes);
+ assertThat(serverLatencies).isEqualTo(FAKE_SERVER_TIMING);
+
+ MetricData connectivityErrorCountMetricData =
+ getMetricData(allMetricData, CONNECTIVITY_ERROR_COUNT_NAME);
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ verifyAttributes(connectivityErrorCountMetricData, expected1);
+ verifyAttributes(connectivityErrorCountMetricData, expected2);
+
+ assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected1)).isEqualTo(1);
+ assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected2)).isEqualTo(0);
}
@Test
public void testReadRowsApplicationLatencyWithAutoFlowControl() throws Exception {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
-
final SettableApiFuture future = SettableApiFuture.create();
final AtomicInteger counter = new AtomicInteger(0);
// For auto flow control, application latency is the time application spent in onResponse.
stub.readRowsCallable()
.call(
- Query.create(TABLE_ID),
+ Query.create(TABLE),
new ResponseObserver() {
@Override
public void onStart(StreamController streamController) {}
@@ -383,37 +418,38 @@ public void onComplete() {
});
future.get();
- verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture());
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
-
assertThat(counter.get()).isEqualTo(fakeService.getResponseCounter().get());
- // Thread.sleep might not sleep for the requested amount depending on the interrupt period
- // defined by the OS.
- // On linux this is ~1ms but on windows may be as high as 15-20ms.
- assertThat(applicationLatency.getValue())
- .isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get());
- assertThat(applicationLatency.getValue())
- .isAtMost(operationLatency.getValue() - SERVER_LATENCY);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency =
+ getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME);
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+
+ assertThat(value).isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get());
+
+ MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long operationLatencyValue =
+ getAggregatedValue(
+ operationLatency,
+ expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build());
+ assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY);
}
@Test
public void testReadRowsApplicationLatencyWithManualFlowControl() throws Exception {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
int counter = 0;
- Iterator rows = stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator();
+ Iterator rows = stub.readRowsCallable().call(Query.create(TABLE)).iterator();
while (rows.hasNext()) {
counter++;
@@ -421,148 +457,189 @@ public void testReadRowsApplicationLatencyWithManualFlowControl() throws Excepti
rows.next();
}
- verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture());
- verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper)
- .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency =
+ getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME);
+
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .build();
- // For manual flow control, the last application latency shouldn't count, because at that point
- // the server already sent back all the responses.
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+ // For manual flow control, the last application latency shouldn't count, because at that
+ // point the server already sent back all the responses.
assertThat(counter).isEqualTo(fakeService.getResponseCounter().get());
- assertThat(applicationLatency.getValue())
- .isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY);
- assertThat(applicationLatency.getValue())
- .isAtMost(operationLatency.getValue() - SERVER_LATENCY);
+ assertThat(value).isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY);
+
+ MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ long operationLatencyValue =
+ getAggregatedValue(
+ operationLatency,
+ expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build());
+ assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY);
}
@Test
- public void testRetryCount() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenAnswer(
- (Answer)
- invocationOnMock ->
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "MutateRow"),
- statsRecorderWrapper));
-
- ArgumentCaptor retryCount = ArgumentCaptor.forClass(Integer.class);
-
+ public void testRetryCount() throws InterruptedException {
stub.mutateRowCallable()
- .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value"));
-
- // In TracedUnaryCallable, we create a future and add a TraceFinisher to the callback. Main
- // thread is blocked on waiting for the future to be completed. When onComplete is called on
- // the grpc thread, the future is completed, however we might not have enough time for
- // TraceFinisher to run. Add a 1 second time out to wait for the callback. This shouldn't have
- // any impact on production code.
- verify(statsRecorderWrapper, timeout(1000)).putRetryCount(retryCount.capture());
+ .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value"));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, RETRY_COUNT_NAME);
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(STATUS_KEY, "OK")
+ .build();
- assertThat(retryCount.getValue()).isEqualTo(fakeService.getAttemptCounter().get() - 1);
+ long value = getAggregatedValue(metricData, expectedAttributes);
+ assertThat(value).isEqualTo(fakeService.getAttemptCounter().get() - 1);
}
@Test
public void testMutateRowAttemptsTagValues() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper));
-
stub.mutateRowCallable()
- .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value"));
-
- // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set
- // attempt succeeded and set the response which will call complete() in AbstractFuture which
- // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be
- // called after the mutateRow call is returned. So there's a race between when the call returns
- // and when the record() is called in onOperationCompletion().
- verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
- assertThat(zone.getAllValues()).containsExactly("global", "global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", CLUSTER);
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "UNAVAILABLE", "OK");
- assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID, TABLE_ID);
+ .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value"));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
+
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
+
+ verifyAttributes(metricData, expected1);
+ verifyAttributes(metricData, expected2);
}
@Test
public void testMutateRowsPartialError() throws InterruptedException {
+ Batcher batcher = stub.newMutateRowsBatcher(TableId.of(TABLE), null);
int numMutations = 6;
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
-
- Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null);
for (int i = 0; i < numMutations; i++) {
String key = i % 2 == 0 ? "key" : "fail-key";
batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v"));
}
- assertThrows(BatchingException.class, () -> batcher.close());
-
- int expectedNumRequests = numMutations / batchElementCount;
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Assert.assertThrows(BatchingException.class, batcher::close);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
- assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER);
- assertThat(status.getAllValues()).containsExactly("OK", "OK", "OK");
+ verifyAttributes(metricData, expected);
}
@Test
public void testMutateRowsRpcError() {
+ Batcher batcher =
+ stub.newMutateRowsBatcher(TableId.of(BAD_TABLE_ID), null);
int numMutations = 6;
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
-
- Batcher batcher = stub.newMutateRowsBatcher(BAD_TABLE_ID, null);
for (int i = 0; i < numMutations; i++) {
- batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v"));
+ String key = i % 2 == 0 ? "key" : "fail-key";
+ batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v"));
}
- assertThrows(BatchingException.class, () -> batcher.close());
-
- int expectedNumRequests = numMutations / batchElementCount;
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Assert.assertThrows(BatchingException.class, batcher::close);
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "NOT_FOUND")
+ .put(TABLE_ID_KEY, BAD_TABLE_ID)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, false)
+ .build();
- assertThat(zone.getAllValues()).containsExactly("global", "global", "global");
- assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", "unspecified");
- assertThat(status.getAllValues()).containsExactly("NOT_FOUND", "NOT_FOUND", "NOT_FOUND");
+ verifyAttributes(metricData, expected);
}
@Test
public void testReadRowsAttemptsTagValues() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
Lists.newArrayList(stub.readRowsCallable().call(Query.create("fake-table")).iterator());
- // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set
- // attempt succeeded and set the response which will call complete() in AbstractFuture which
- // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be
- // called after the mutateRow call is returned. So there's a race between when the call returns
- // and when the record() is called in onOperationCompletion().
- verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get()))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
- assertThat(zone.getAllValues()).containsExactly("global", ZONE);
- assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER);
- assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK");
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected1 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "UNAVAILABLE")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, "global")
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, true)
+ .build();
+
+ Attributes expected2 =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "OK")
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .put(STREAMING_KEY, true)
+ .build();
+
+ verifyAttributes(metricData, expected1);
+ verifyAttributes(metricData, expected2);
}
@Test
public void testBatchBlockingLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper));
- try (Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null)) {
+ try (Batcher batcher = stub.newMutateRowsBatcher(TABLE, null)) {
for (int i = 0; i < 6; i++) {
batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v"));
}
@@ -571,86 +648,100 @@ public void testBatchBlockingLatencies() throws InterruptedException {
batcher.close();
int expectedNumRequests = 6 / batchElementCount;
- ArgumentCaptor throttledTime = ArgumentCaptor.forClass(Long.class);
- verify(statsRecorderWrapper, timeout(1000).times(expectedNumRequests))
- .putClientBlockingLatencies(throttledTime.capture());
- // After the first request is sent, batcher will block on add because of the server latency.
- // Blocking latency should be around server latency.
- assertThat(throttledTime.getAllValues().get(1)).isAtLeast(SERVER_LATENCY - 10);
- assertThat(throttledTime.getAllValues().get(2)).isAtLeast(SERVER_LATENCY - 10);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData applicationLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
- verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ Attributes expectedAttributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(METHOD_KEY, "Bigtable.MutateRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE);
- assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER);
+ long value = getAggregatedValue(applicationLatency, expectedAttributes);
+ // After the first request is sent, batcher will block on add because of the server latency.
+ // Blocking latency should be around server latency. So each data point would be at least
+ // (SERVER_LATENCY - 10).
+ long expected = (SERVER_LATENCY - 10) * (expectedNumRequests - 1) / expectedNumRequests;
+ assertThat(value).isAtLeast(expected);
}
}
@Test
- public void testQueuedOnChannelServerStreamLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
- stub.readRowsCallable().all().call(Query.create(TABLE_ID));
-
- ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class);
-
- verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get()))
- .putClientBlockingLatencies(blockedTime.capture());
+ public void testQueuedOnChannelServerStreamLatencies() {
+ stub.readRowsCallable().all().call(Query.create(TABLE));
+
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
+
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
+ long value = getAggregatedValue(clientLatency, attributes);
+ assertThat(value).isAtLeast(CHANNEL_BLOCKING_LATENCY);
}
@Test
- public void testQueuedOnChannelUnaryLatencies() throws InterruptedException {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper));
- stub.mutateRowCallable().call(RowMutation.create(TABLE_ID, "a-key").setCell("f", "q", "v"));
+ public void testQueuedOnChannelUnaryLatencies() {
- ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class);
+ stub.mutateRowCallable().call(RowMutation.create(TABLE, "a-key").setCell("f", "q", "v"));
- verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get()))
- .putClientBlockingLatencies(blockedTime.capture());
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME);
- assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
- assertThat(blockedTime.getAllValues().get(2)).isAtLeast(CHANNEL_BLOCKING_LATENCY);
+ Attributes attributes =
+ baseAttributes
+ .toBuilder()
+ .put(TABLE_ID_KEY, TABLE)
+ .put(CLUSTER_ID_KEY, CLUSTER)
+ .put(ZONE_ID_KEY, ZONE)
+ .put(METHOD_KEY, "Bigtable.MutateRow")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
+
+ long expected = CHANNEL_BLOCKING_LATENCY * 2 / 3;
+ long actual = getAggregatedValue(clientLatency, attributes);
+ assertThat(actual).isAtLeast(expected);
}
@Test
public void testPermanentFailure() {
- when(mockFactory.newTracer(any(), any(), any()))
- .thenReturn(
- new BuiltinMetricsTracer(
- OperationType.ServerStreaming,
- SpanName.of("Bigtable", "ReadRows"),
- statsRecorderWrapper));
-
try {
Lists.newArrayList(stub.readRowsCallable().call(Query.create(BAD_TABLE_ID)).iterator());
Assert.fail("Request should throw not found error");
} catch (NotFoundException e) {
}
- ArgumentCaptor attemptLatency = ArgumentCaptor.forClass(Long.class);
- ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class);
+ Collection allMetricData = metricReader.collectAllMetrics();
+ MetricData attemptLatency = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME);
+
+ Attributes expected =
+ baseAttributes
+ .toBuilder()
+ .put(STATUS_KEY, "NOT_FOUND")
+ .put(TABLE_ID_KEY, BAD_TABLE_ID)
+ .put(CLUSTER_ID_KEY, "unspecified")
+ .put(ZONE_ID_KEY, "global")
+ .put(STREAMING_KEY, true)
+ .put(METHOD_KEY, "Bigtable.ReadRows")
+ .put(CLIENT_NAME_KEY, CLIENT_NAME)
+ .build();
- verify(statsRecorderWrapper, timeout(50)).putAttemptLatencies(attemptLatency.capture());
- verify(statsRecorderWrapper, timeout(50)).putOperationLatencies(operationLatency.capture());
- verify(statsRecorderWrapper, timeout(50))
- .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture());
+ verifyAttributes(attemptLatency, expected);
- assertThat(status.getValue()).isEqualTo("NOT_FOUND");
- assertThat(tableId.getValue()).isEqualTo(BAD_TABLE_ID);
- assertThat(cluster.getValue()).isEqualTo("unspecified");
- assertThat(zone.getValue()).isEqualTo("global");
+ MetricData opLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME);
+ verifyAttributes(opLatency, expected);
}
private static class FakeService extends BigtableGrpc.BigtableImplBase {
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
index a6670182b8..4ab19a5337 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java
@@ -23,17 +23,29 @@
import com.google.api.gax.grpc.ChannelPoolSettings;
import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider;
import com.google.bigtable.v2.*;
+import com.google.cloud.bigtable.Version;
import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
import com.google.cloud.bigtable.data.v2.models.*;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
-import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection;
import io.grpc.Server;
import io.grpc.Status;
import io.grpc.StatusRuntimeException;
import io.grpc.stub.StreamObserver;
+import io.opentelemetry.api.common.Attributes;
+import io.opentelemetry.sdk.OpenTelemetrySdk;
+import io.opentelemetry.sdk.metrics.InstrumentSelector;
+import io.opentelemetry.sdk.metrics.SdkMeterProvider;
+import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
+import io.opentelemetry.sdk.metrics.View;
+import io.opentelemetry.sdk.metrics.data.HistogramPointData;
+import io.opentelemetry.sdk.metrics.data.MetricData;
+import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
+import java.util.ArrayList;
+import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.concurrent.ScheduledExecutorService;
import org.junit.After;
import org.junit.Before;
@@ -51,25 +63,50 @@ public class ErrorCountPerConnectionTest {
private final FakeService fakeService = new FakeService();
private EnhancedBigtableStubSettings.Builder builder;
private ArgumentCaptor runnableCaptor;
- private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection;
+
+ private InMemoryMetricReader metricReader;
+
+ private Attributes attributes;
@Before
public void setup() throws Exception {
server = FakeServiceBuilder.create(fakeService).start();
ScheduledExecutorService executors = Mockito.mock(ScheduledExecutorService.class);
+
+ attributes =
+ Attributes.builder()
+ .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, "fake-project")
+ .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, "fake-instance")
+ .put(BuiltinMetricsConstants.APP_PROFILE_KEY, "")
+ .put(BuiltinMetricsConstants.CLIENT_NAME_KEY, "bigtable-java/" + Version.VERSION)
+ .build();
+
+ metricReader = InMemoryMetricReader.create();
+
+ SdkMeterProviderBuilder meterProvider =
+ SdkMeterProvider.builder().registerMetricReader(metricReader);
+
+ for (Map.Entry entry :
+ BuiltinMetricsConstants.getAllViews().entrySet()) {
+ meterProvider.registerView(entry.getKey(), entry.getValue());
+ }
+
+ OpenTelemetrySdk otel =
+ OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
+
builder =
BigtableDataSettings.newBuilderForEmulator(server.getPort())
.stubSettings()
.setBackgroundExecutorProvider(FixedExecutorProvider.create(executors))
.setProjectId("fake-project")
- .setInstanceId("fake-instance");
+ .setInstanceId("fake-instance")
+ .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(otel));
+
runnableCaptor = ArgumentCaptor.forClass(Runnable.class);
Mockito.when(
executors.scheduleAtFixedRate(runnableCaptor.capture(), anyLong(), anyLong(), any()))
.thenReturn(null);
-
- statsRecorderWrapperForConnection = Mockito.mock(StatsRecorderWrapperForConnection.class);
}
@After
@@ -98,14 +135,21 @@ public void readWithOneChannel() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
+
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
+
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+
+ // Make sure the correct bucket is updated with the correct number of data points
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index = findDataPointIndex(point.getBoundaries(), errorCount);
+ assertThat(point.getCounts().get(index)).isEqualTo(1);
}
@Test
@@ -131,28 +175,35 @@ public void readWithTwoChannels() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(2);
- // Requests get assigned to channels using a Round Robin algorithm, so half to each.
- assertThat(allErrorCounts).containsExactly(totalErrorCount / 2, totalErrorCount / 2);
+ long errorCountPerChannel = totalErrorCount / 2;
+
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+
+ // The 2 channels should get equal amount of errors, so the totalErrorCount / 2 bucket is
+ // updated twice.
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index = findDataPointIndex(point.getBoundaries(), errorCountPerChannel);
+ assertThat(point.getCounts().get(index)).isEqualTo(2);
}
@Test
public void readOverTwoPeriods() throws Exception {
EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build());
- long errorCount = 0;
+ long errorCount1 = 0;
for (int i = 0; i < 20; i++) {
Query query;
if (i % 3 == 0) {
query = Query.create(ERROR_TABLE_NAME);
- errorCount += 1;
+ errorCount1 += 1;
} else {
query = Query.create(SUCCESS_TABLE_NAME);
}
@@ -162,16 +213,9 @@ public void readOverTwoPeriods() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
- runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
- errorCount = 0;
+ runInterceptorTasksAndAssertCount();
+ long errorCount2 = 0;
for (int i = 0; i < 20; i++) {
Query query;
@@ -179,7 +223,7 @@ public void readOverTwoPeriods() throws Exception {
query = Query.create(SUCCESS_TABLE_NAME);
} else {
query = Query.create(ERROR_TABLE_NAME);
- errorCount += 1;
+ errorCount2 += 1;
}
try {
stub.readRowsCallable().call(query).iterator().hasNext();
@@ -187,27 +231,22 @@ public void readOverTwoPeriods() throws Exception {
// noop
}
}
- errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
+
runInterceptorTasksAndAssertCount();
- allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(errorCount);
- }
- @Test
- public void ignoreInactiveConnection() throws Exception {
- EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build());
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
- runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts).isEmpty();
+ ArrayList histogramPointData =
+ new ArrayList<>(metricData.getHistogramData().getPoints());
+ assertThat(histogramPointData.size()).isEqualTo(1);
+ HistogramPointData point = histogramPointData.get(0);
+ int index1 = findDataPointIndex(point.getBoundaries(), errorCount1);
+ int index2 = findDataPointIndex(point.getBoundaries(), errorCount2);
+ assertThat(point.getCounts().get(index1)).isEqualTo(1);
+ assertThat(point.getCounts().get(index2)).isEqualTo(1);
}
@Test
@@ -221,22 +260,19 @@ public void noFailedRequests() throws Exception {
// noop
}
}
- ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class);
- Mockito.doNothing()
- .when(statsRecorderWrapperForConnection)
- .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture());
runInterceptorTasksAndAssertCount();
- List allErrorCounts = errorCountCaptor.getAllValues();
- assertThat(allErrorCounts.size()).isEqualTo(1);
- assertThat(allErrorCounts.get(0)).isEqualTo(0);
+ Collection allMetrics = metricReader.collectAllMetrics();
+ MetricData metricData =
+ BuiltinMetricsTestUtils.getMetricData(
+ allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME);
+ long value = BuiltinMetricsTestUtils.getAggregatedValue(metricData, attributes);
+ assertThat(value).isEqualTo(0);
}
private void runInterceptorTasksAndAssertCount() {
int actualNumOfTasks = 0;
for (Runnable runnable : runnableCaptor.getAllValues()) {
if (runnable instanceof ErrorCountPerConnectionMetricTracker) {
- ((ErrorCountPerConnectionMetricTracker) runnable)
- .setStatsRecorderWrapperForConnection(statsRecorderWrapperForConnection);
runnable.run();
actualNumOfTasks++;
}
@@ -244,6 +280,16 @@ private void runInterceptorTasksAndAssertCount() {
assertThat(actualNumOfTasks).isEqualTo(1);
}
+ private int findDataPointIndex(List boundaries, long dataPoint) {
+ int index = 0;
+ for (; index < boundaries.size(); index++) {
+ if (boundaries.get(index) >= dataPoint) {
+ break;
+ }
+ }
+ return index;
+ }
+
static class FakeService extends BigtableGrpc.BigtableImplBase {
@Override
public void readRows(
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
index 15bd9171f0..d72eac4056 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
@@ -39,7 +39,6 @@
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
-import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor;
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableMap;
@@ -120,15 +119,20 @@ public void setUp() throws Exception {
.setInstanceId(INSTANCE_ID)
.setAppProfileId(APP_PROFILE_ID)
.build();
- EnhancedBigtableStubSettings stubSettings =
- settings
- .getStubSettings()
+
+ ClientContext clientContext =
+ EnhancedBigtableStub.createClientContext(settings.getStubSettings());
+ clientContext =
+ clientContext
.toBuilder()
.setTracerFactory(
EnhancedBigtableStub.createBigtableTracerFactory(
- settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder()))
+ settings.getStubSettings(),
+ Tags.getTagger(),
+ localStats.getStatsRecorder(),
+ null))
.build();
- stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings));
+ stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext);
}
@After
diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/test_helpers/env/TestEnvRule.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/test_helpers/env/TestEnvRule.java
index d4470637af..3b2ebb151c 100644
--- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/test_helpers/env/TestEnvRule.java
+++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/test_helpers/env/TestEnvRule.java
@@ -24,6 +24,7 @@
import com.google.cloud.bigtable.admin.v2.models.AppProfile;
import com.google.cloud.bigtable.admin.v2.models.Cluster;
import com.google.cloud.bigtable.admin.v2.models.Instance;
+import com.google.cloud.bigtable.admin.v2.models.UpdateAuthorizedViewRequest;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.base.Strings;
@@ -203,6 +204,7 @@ private void cleanupStaleTables(String stalePrefix) {
continue;
}
if (stalePrefix.compareTo(tableId) > 0) {
+ prepTableForDelete(tableId);
try {
env().getTableAdminClient().deleteTable(tableId);
} catch (NotFoundException ignored) {
@@ -212,6 +214,21 @@ private void cleanupStaleTables(String stalePrefix) {
}
}
+ private void prepTableForDelete(String tableId) {
+ // Unprotected views
+ if (!(env() instanceof EmulatorEnv)) {
+ for (String viewId : env().getTableAdminClient().listAuthorizedViews(tableId)) {
+ try {
+ env()
+ .getTableAdminClient()
+ .updateAuthorizedView(
+ UpdateAuthorizedViewRequest.of(tableId, viewId).setDeletionProtection(false));
+ } catch (NotFoundException ignored) {
+ }
+ }
+ }
+ }
+
/**
* Clean up AppProfile that were dynamically created in the default instance that have been
* orphaned.
diff --git a/grpc-google-cloud-bigtable-admin-v2/pom.xml b/grpc-google-cloud-bigtable-admin-v2/pom.xml
index 4606ec8b84..c356f47fed 100644
--- a/grpc-google-cloud-bigtable-admin-v2/pom.xml
+++ b/grpc-google-cloud-bigtable-admin-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
grpc-google-cloud-bigtable-admin-v2
- 2.37.0
+ 2.38.0
grpc-google-cloud-bigtable-admin-v2
GRPC library for grpc-google-cloud-bigtable-admin-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.37.0
+ 2.38.0
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.37.0
+ 2.38.0
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.37.0
+ 2.38.0
pom
import
diff --git a/grpc-google-cloud-bigtable-v2/pom.xml b/grpc-google-cloud-bigtable-v2/pom.xml
index 3e89608789..297f0c1c37 100644
--- a/grpc-google-cloud-bigtable-v2/pom.xml
+++ b/grpc-google-cloud-bigtable-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
grpc-google-cloud-bigtable-v2
- 2.37.0
+ 2.38.0
grpc-google-cloud-bigtable-v2
GRPC library for grpc-google-cloud-bigtable-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.37.0
+ 2.38.0
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.37.0
+ 2.38.0
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.37.0
+ 2.38.0
pom
import
diff --git a/pom.xml b/pom.xml
index ffa93e753e..a94d96edbf 100644
--- a/pom.xml
+++ b/pom.xml
@@ -4,7 +4,7 @@
google-cloud-bigtable-parent
pom
- 2.37.0
+ 2.38.0
Google Cloud Bigtable Parent
https://github.com/googleapis/java-bigtable
@@ -153,27 +153,27 @@
com.google.api.grpc
proto-google-cloud-bigtable-v2
- 2.37.0
+ 2.38.0
com.google.api.grpc
proto-google-cloud-bigtable-admin-v2
- 2.37.0
+ 2.38.0
com.google.api.grpc
grpc-google-cloud-bigtable-v2
- 2.37.0
+ 2.38.0
com.google.api.grpc
grpc-google-cloud-bigtable-admin-v2
- 2.37.0
+ 2.38.0
com.google.cloud
google-cloud-bigtable
- 2.37.0
+ 2.38.0
@@ -347,22 +347,6 @@
-
-
-
- with-shaded
-
-
- !skip-shaded
-
-
-
- google-cloud-bigtable-stats
-
-
diff --git a/proto-google-cloud-bigtable-admin-v2/pom.xml b/proto-google-cloud-bigtable-admin-v2/pom.xml
index c06610f581..8321a0b6a0 100644
--- a/proto-google-cloud-bigtable-admin-v2/pom.xml
+++ b/proto-google-cloud-bigtable-admin-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
proto-google-cloud-bigtable-admin-v2
- 2.37.0
+ 2.38.0
proto-google-cloud-bigtable-admin-v2
PROTO library for proto-google-cloud-bigtable-admin-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.37.0
+ 2.38.0
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.37.0
+ 2.38.0
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.37.0
+ 2.38.0
pom
import
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfile.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfile.java
index e9fbfaa569..e96392a2f0 100644
--- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfile.java
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfile.java
@@ -2382,6 +2382,883 @@ public com.google.bigtable.admin.v2.AppProfile.StandardIsolation getDefaultInsta
}
}
+ public interface DataBoostIsolationReadOnlyOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly)
+ com.google.protobuf.MessageOrBuilder {
+
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return Whether the computeBillingOwner field is set.
+ */
+ boolean hasComputeBillingOwner();
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return The enum numeric value on the wire for computeBillingOwner.
+ */
+ int getComputeBillingOwnerValue();
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return The computeBillingOwner.
+ */
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ getComputeBillingOwner();
+ }
+ /**
+ *
+ *
+ *
+ * Data Boost is a serverless compute capability that lets you run
+ * high-throughput read jobs on your Bigtable data, without impacting the
+ * performance of the clusters that handle your application traffic.
+ * Currently, Data Boost exclusively supports read-only use-cases with
+ * single-cluster routing.
+ *
+ * Data Boost reads are only guaranteed to see the results of writes that
+ * were written at least 30 minutes ago. This means newly written values may
+ * not become visible for up to 30m, and also means that old values may
+ * remain visible for up to 30m after being deleted or overwritten. To
+ * mitigate the staleness of the data, users may either wait 30m, or use
+ * CheckConsistency.
+ *
+ *
+ * Protobuf type {@code google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly}
+ */
+ public static final class DataBoostIsolationReadOnly
+ extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly)
+ DataBoostIsolationReadOnlyOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use DataBoostIsolationReadOnly.newBuilder() to construct.
+ private DataBoostIsolationReadOnly(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private DataBoostIsolationReadOnly() {
+ computeBillingOwner_ = 0;
+ }
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new DataBoostIsolationReadOnly();
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.bigtable.admin.v2.InstanceProto
+ .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.bigtable.admin.v2.InstanceProto
+ .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.class,
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder.class);
+ }
+
+ /**
+ *
+ *
+ *
+ * Compute Billing Owner specifies how usage should be accounted when using
+ * Data Boost. Compute Billing Owner also configures which Cloud Project is
+ * charged for relevant quota.
+ *
+ *
+ * Protobuf enum {@code
+ * google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner}
+ */
+ public enum ComputeBillingOwner implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ *
+ *
+ *
+ * Unspecified value.
+ *
+ *
+ * COMPUTE_BILLING_OWNER_UNSPECIFIED = 0;
+ */
+ COMPUTE_BILLING_OWNER_UNSPECIFIED(0),
+ /**
+ *
+ *
+ *
+ * The host Cloud Project containing the targeted Bigtable Instance /
+ * Table pays for compute.
+ *
+ *
+ * HOST_PAYS = 1;
+ */
+ HOST_PAYS(1),
+ UNRECOGNIZED(-1),
+ ;
+
+ /**
+ *
+ *
+ *
+ * Unspecified value.
+ *
+ *
+ * COMPUTE_BILLING_OWNER_UNSPECIFIED = 0;
+ */
+ public static final int COMPUTE_BILLING_OWNER_UNSPECIFIED_VALUE = 0;
+ /**
+ *
+ *
+ *
+ * The host Cloud Project containing the targeted Bigtable Instance /
+ * Table pays for compute.
+ *
+ *
+ * HOST_PAYS = 1;
+ */
+ public static final int HOST_PAYS_VALUE = 1;
+
+ public final int getNumber() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalArgumentException(
+ "Can't get the number of an unknown enum value.");
+ }
+ return value;
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static ComputeBillingOwner valueOf(int value) {
+ return forNumber(value);
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ */
+ public static ComputeBillingOwner forNumber(int value) {
+ switch (value) {
+ case 0:
+ return COMPUTE_BILLING_OWNER_UNSPECIFIED;
+ case 1:
+ return HOST_PAYS;
+ default:
+ return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ internalGetValueMap() {
+ return internalValueMap;
+ }
+
+ private static final com.google.protobuf.Internal.EnumLiteMap
+ internalValueMap =
+ new com.google.protobuf.Internal.EnumLiteMap() {
+ public ComputeBillingOwner findValueByNumber(int number) {
+ return ComputeBillingOwner.forNumber(number);
+ }
+ };
+
+ public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalStateException(
+ "Can't get the descriptor of an unrecognized enum value.");
+ }
+ return getDescriptor().getValues().get(ordinal());
+ }
+
+ public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() {
+ return getDescriptor();
+ }
+
+ public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() {
+ return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.getDescriptor()
+ .getEnumTypes()
+ .get(0);
+ }
+
+ private static final ComputeBillingOwner[] VALUES = values();
+
+ public static ComputeBillingOwner valueOf(
+ com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
+ if (desc.getType() != getDescriptor()) {
+ throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type.");
+ }
+ if (desc.getIndex() == -1) {
+ return UNRECOGNIZED;
+ }
+ return VALUES[desc.getIndex()];
+ }
+
+ private final int value;
+
+ private ComputeBillingOwner(int value) {
+ this.value = value;
+ }
+
+ // @@protoc_insertion_point(enum_scope:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner)
+ }
+
+ private int bitField0_;
+ public static final int COMPUTE_BILLING_OWNER_FIELD_NUMBER = 1;
+ private int computeBillingOwner_ = 0;
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return Whether the computeBillingOwner field is set.
+ */
+ @java.lang.Override
+ public boolean hasComputeBillingOwner() {
+ return ((bitField0_ & 0x00000001) != 0);
+ }
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return The enum numeric value on the wire for computeBillingOwner.
+ */
+ @java.lang.Override
+ public int getComputeBillingOwnerValue() {
+ return computeBillingOwner_;
+ }
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return The computeBillingOwner.
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ getComputeBillingOwner() {
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ result =
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ .forNumber(computeBillingOwner_);
+ return result == null
+ ? com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ .UNRECOGNIZED
+ : result;
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ if (((bitField0_ & 0x00000001) != 0)) {
+ output.writeEnum(1, computeBillingOwner_);
+ }
+ getUnknownFields().writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ if (((bitField0_ & 0x00000001) != 0)) {
+ size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, computeBillingOwner_);
+ }
+ size += getUnknownFields().getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly)) {
+ return super.equals(obj);
+ }
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly other =
+ (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) obj;
+
+ if (hasComputeBillingOwner() != other.hasComputeBillingOwner()) return false;
+ if (hasComputeBillingOwner()) {
+ if (computeBillingOwner_ != other.computeBillingOwner_) return false;
+ }
+ if (!getUnknownFields().equals(other.getUnknownFields())) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ if (hasComputeBillingOwner()) {
+ hash = (37 * hash) + COMPUTE_BILLING_OWNER_FIELD_NUMBER;
+ hash = (53 * hash) + computeBillingOwner_;
+ }
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ byte[] data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(
+ com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * Data Boost is a serverless compute capability that lets you run
+ * high-throughput read jobs on your Bigtable data, without impacting the
+ * performance of the clusters that handle your application traffic.
+ * Currently, Data Boost exclusively supports read-only use-cases with
+ * single-cluster routing.
+ *
+ * Data Boost reads are only guaranteed to see the results of writes that
+ * were written at least 30 minutes ago. This means newly written values may
+ * not become visible for up to 30m, and also means that old values may
+ * remain visible for up to 30m after being deleted or overwritten. To
+ * mitigate the staleness of the data, users may either wait 30m, or use
+ * CheckConsistency.
+ *
+ *
+ * Protobuf type {@code google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly}
+ */
+ public static final class Builder
+ extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly)
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.bigtable.admin.v2.InstanceProto
+ .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.bigtable.admin.v2.InstanceProto
+ .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.class,
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder.class);
+ }
+
+ // Construct using
+ // com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.newBuilder()
+ private Builder() {}
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ bitField0_ = 0;
+ computeBillingOwner_ = 0;
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.bigtable.admin.v2.InstanceProto
+ .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ getDefaultInstanceForType() {
+ return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly build() {
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly buildPartial() {
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly result =
+ new com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly(this);
+ if (bitField0_ != 0) {
+ buildPartial0(result);
+ }
+ onBuilt();
+ return result;
+ }
+
+ private void buildPartial0(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly result) {
+ int from_bitField0_ = bitField0_;
+ int to_bitField0_ = 0;
+ if (((from_bitField0_ & 0x00000001) != 0)) {
+ result.computeBillingOwner_ = computeBillingOwner_;
+ to_bitField0_ |= 0x00000001;
+ }
+ result.bitField0_ |= to_bitField0_;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field,
+ int index,
+ java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) {
+ return mergeFrom(
+ (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly other) {
+ if (other
+ == com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .getDefaultInstance()) return this;
+ if (other.hasComputeBillingOwner()) {
+ setComputeBillingOwner(other.getComputeBillingOwner());
+ }
+ this.mergeUnknownFields(other.getUnknownFields());
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ case 8:
+ {
+ computeBillingOwner_ = input.readEnum();
+ bitField0_ |= 0x00000001;
+ break;
+ } // case 8
+ default:
+ {
+ if (!super.parseUnknownField(input, extensionRegistry, tag)) {
+ done = true; // was an endgroup tag
+ }
+ break;
+ } // default:
+ } // switch (tag)
+ } // while (!done)
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.unwrapIOException();
+ } finally {
+ onChanged();
+ } // finally
+ return this;
+ }
+
+ private int bitField0_;
+
+ private int computeBillingOwner_ = 0;
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return Whether the computeBillingOwner field is set.
+ */
+ @java.lang.Override
+ public boolean hasComputeBillingOwner() {
+ return ((bitField0_ & 0x00000001) != 0);
+ }
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return The enum numeric value on the wire for computeBillingOwner.
+ */
+ @java.lang.Override
+ public int getComputeBillingOwnerValue() {
+ return computeBillingOwner_;
+ }
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @param value The enum numeric value on the wire for computeBillingOwner to set.
+ * @return This builder for chaining.
+ */
+ public Builder setComputeBillingOwnerValue(int value) {
+ computeBillingOwner_ = value;
+ bitField0_ |= 0x00000001;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return The computeBillingOwner.
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ getComputeBillingOwner() {
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ result =
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .ComputeBillingOwner.forNumber(computeBillingOwner_);
+ return result == null
+ ? com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ .UNRECOGNIZED
+ : result;
+ }
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @param value The computeBillingOwner to set.
+ * @return This builder for chaining.
+ */
+ public Builder setComputeBillingOwner(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner
+ value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000001;
+ computeBillingOwner_ = value.getNumber();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The Compute Billing Owner for this Data Boost App Profile.
+ *
+ *
+ *
+ * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1;
+ *
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearComputeBillingOwner() {
+ bitField0_ = (bitField0_ & ~0x00000001);
+ computeBillingOwner_ = 0;
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly)
+ private static final com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly();
+ }
+
+ public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public DataBoostIsolationReadOnly parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ Builder builder = newBuilder();
+ try {
+ builder.mergeFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(builder.buildPartial());
+ } catch (com.google.protobuf.UninitializedMessageException e) {
+ throw e.asInvalidProtocolBufferException()
+ .setUnfinishedMessage(builder.buildPartial());
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e)
+ .setUnfinishedMessage(builder.buildPartial());
+ }
+ return builder.buildPartial();
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+ }
+
private int routingPolicyCase_ = 0;
@SuppressWarnings("serial")
@@ -2443,6 +3320,7 @@ public enum IsolationCase
@java.lang.Deprecated
PRIORITY(7),
STANDARD_ISOLATION(11),
+ DATA_BOOST_ISOLATION_READ_ONLY(10),
ISOLATION_NOT_SET(0);
private final int value;
@@ -2465,6 +3343,8 @@ public static IsolationCase forNumber(int value) {
return PRIORITY;
case 11:
return STANDARD_ISOLATION;
+ case 10:
+ return DATA_BOOST_ISOLATION_READ_ONLY;
case 0:
return ISOLATION_NOT_SET;
default:
@@ -2778,7 +3658,7 @@ public com.google.bigtable.admin.v2.AppProfile.SingleClusterRouting getSingleClu
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return Whether the priority field is set.
*/
@java.lang.Deprecated
@@ -2798,7 +3678,7 @@ public boolean hasPriority() {
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return The enum numeric value on the wire for priority.
*/
@java.lang.Deprecated
@@ -2821,7 +3701,7 @@ public int getPriorityValue() {
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return The priority.
*/
@java.lang.Deprecated
@@ -2892,6 +3772,68 @@ public com.google.bigtable.admin.v2.AppProfile.StandardIsolation getStandardIsol
return com.google.bigtable.admin.v2.AppProfile.StandardIsolation.getDefaultInstance();
}
+ public static final int DATA_BOOST_ISOLATION_READ_ONLY_FIELD_NUMBER = 10;
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ *
+ * @return Whether the dataBoostIsolationReadOnly field is set.
+ */
+ @java.lang.Override
+ public boolean hasDataBoostIsolationReadOnly() {
+ return isolationCase_ == 10;
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ *
+ * @return The dataBoostIsolationReadOnly.
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ getDataBoostIsolationReadOnly() {
+ if (isolationCase_ == 10) {
+ return (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_;
+ }
+ return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.getDefaultInstance();
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder
+ getDataBoostIsolationReadOnlyOrBuilder() {
+ if (isolationCase_ == 10) {
+ return (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_;
+ }
+ return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.getDefaultInstance();
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -2926,6 +3868,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (isolationCase_ == 7) {
output.writeEnum(7, ((java.lang.Integer) isolation_));
}
+ if (isolationCase_ == 10) {
+ output.writeMessage(
+ 10, (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_);
+ }
if (isolationCase_ == 11) {
output.writeMessage(
11, (com.google.bigtable.admin.v2.AppProfile.StandardIsolation) isolation_);
@@ -2964,6 +3910,11 @@ public int getSerializedSize() {
com.google.protobuf.CodedOutputStream.computeEnumSize(
7, ((java.lang.Integer) isolation_));
}
+ if (isolationCase_ == 10) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(
+ 10, (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_);
+ }
if (isolationCase_ == 11) {
size +=
com.google.protobuf.CodedOutputStream.computeMessageSize(
@@ -3007,6 +3958,10 @@ public boolean equals(final java.lang.Object obj) {
case 11:
if (!getStandardIsolation().equals(other.getStandardIsolation())) return false;
break;
+ case 10:
+ if (!getDataBoostIsolationReadOnly().equals(other.getDataBoostIsolationReadOnly()))
+ return false;
+ break;
case 0:
default:
}
@@ -3048,6 +4003,10 @@ public int hashCode() {
hash = (37 * hash) + STANDARD_ISOLATION_FIELD_NUMBER;
hash = (53 * hash) + getStandardIsolation().hashCode();
break;
+ case 10:
+ hash = (37 * hash) + DATA_BOOST_ISOLATION_READ_ONLY_FIELD_NUMBER;
+ hash = (53 * hash) + getDataBoostIsolationReadOnly().hashCode();
+ break;
case 0:
default:
}
@@ -3203,6 +4162,9 @@ public Builder clear() {
if (standardIsolationBuilder_ != null) {
standardIsolationBuilder_.clear();
}
+ if (dataBoostIsolationReadOnlyBuilder_ != null) {
+ dataBoostIsolationReadOnlyBuilder_.clear();
+ }
routingPolicyCase_ = 0;
routingPolicy_ = null;
isolationCase_ = 0;
@@ -3269,6 +4231,9 @@ private void buildPartialOneofs(com.google.bigtable.admin.v2.AppProfile result)
if (isolationCase_ == 11 && standardIsolationBuilder_ != null) {
result.isolation_ = standardIsolationBuilder_.build();
}
+ if (isolationCase_ == 10 && dataBoostIsolationReadOnlyBuilder_ != null) {
+ result.isolation_ = dataBoostIsolationReadOnlyBuilder_.build();
+ }
}
@java.lang.Override
@@ -3358,6 +4323,11 @@ public Builder mergeFrom(com.google.bigtable.admin.v2.AppProfile other) {
mergeStandardIsolation(other.getStandardIsolation());
break;
}
+ case DATA_BOOST_ISOLATION_READ_ONLY:
+ {
+ mergeDataBoostIsolationReadOnly(other.getDataBoostIsolationReadOnly());
+ break;
+ }
case ISOLATION_NOT_SET:
{
break;
@@ -3428,6 +4398,13 @@ public Builder mergeFrom(
isolation_ = rawValue;
break;
} // case 56
+ case 82:
+ {
+ input.readMessage(
+ getDataBoostIsolationReadOnlyFieldBuilder().getBuilder(), extensionRegistry);
+ isolationCase_ = 10;
+ break;
+ } // case 82
case 90:
{
input.readMessage(
@@ -4318,7 +5295,7 @@ public Builder clearSingleClusterRouting() {
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return Whether the priority field is set.
*/
@java.lang.Override
@@ -4339,7 +5316,7 @@ public boolean hasPriority() {
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return The enum numeric value on the wire for priority.
*/
@java.lang.Override
@@ -4363,7 +5340,7 @@ public int getPriorityValue() {
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @param value The enum numeric value on the wire for priority to set.
* @return This builder for chaining.
*/
@@ -4387,7 +5364,7 @@ public Builder setPriorityValue(int value) {
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return The priority.
*/
@java.lang.Override
@@ -4416,7 +5393,7 @@ public com.google.bigtable.admin.v2.AppProfile.Priority getPriority() {
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @param value The priority to set.
* @return This builder for chaining.
*/
@@ -4443,7 +5420,7 @@ public Builder setPriority(com.google.bigtable.admin.v2.AppProfile.Priority valu
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return This builder for chaining.
*/
@java.lang.Deprecated
@@ -4680,6 +5657,256 @@ public Builder clearStandardIsolation() {
return standardIsolationBuilder_;
}
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly,
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder,
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder>
+ dataBoostIsolationReadOnlyBuilder_;
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ *
+ * @return Whether the dataBoostIsolationReadOnly field is set.
+ */
+ @java.lang.Override
+ public boolean hasDataBoostIsolationReadOnly() {
+ return isolationCase_ == 10;
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ *
+ * @return The dataBoostIsolationReadOnly.
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ getDataBoostIsolationReadOnly() {
+ if (dataBoostIsolationReadOnlyBuilder_ == null) {
+ if (isolationCase_ == 10) {
+ return (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_;
+ }
+ return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .getDefaultInstance();
+ } else {
+ if (isolationCase_ == 10) {
+ return dataBoostIsolationReadOnlyBuilder_.getMessage();
+ }
+ return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ public Builder setDataBoostIsolationReadOnly(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly value) {
+ if (dataBoostIsolationReadOnlyBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ isolation_ = value;
+ onChanged();
+ } else {
+ dataBoostIsolationReadOnlyBuilder_.setMessage(value);
+ }
+ isolationCase_ = 10;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ public Builder setDataBoostIsolationReadOnly(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder
+ builderForValue) {
+ if (dataBoostIsolationReadOnlyBuilder_ == null) {
+ isolation_ = builderForValue.build();
+ onChanged();
+ } else {
+ dataBoostIsolationReadOnlyBuilder_.setMessage(builderForValue.build());
+ }
+ isolationCase_ = 10;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ public Builder mergeDataBoostIsolationReadOnly(
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly value) {
+ if (dataBoostIsolationReadOnlyBuilder_ == null) {
+ if (isolationCase_ == 10
+ && isolation_
+ != com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .getDefaultInstance()) {
+ isolation_ =
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.newBuilder(
+ (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly)
+ isolation_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ isolation_ = value;
+ }
+ onChanged();
+ } else {
+ if (isolationCase_ == 10) {
+ dataBoostIsolationReadOnlyBuilder_.mergeFrom(value);
+ } else {
+ dataBoostIsolationReadOnlyBuilder_.setMessage(value);
+ }
+ }
+ isolationCase_ = 10;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ public Builder clearDataBoostIsolationReadOnly() {
+ if (dataBoostIsolationReadOnlyBuilder_ == null) {
+ if (isolationCase_ == 10) {
+ isolationCase_ = 0;
+ isolation_ = null;
+ onChanged();
+ }
+ } else {
+ if (isolationCase_ == 10) {
+ isolationCase_ = 0;
+ isolation_ = null;
+ }
+ dataBoostIsolationReadOnlyBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder
+ getDataBoostIsolationReadOnlyBuilder() {
+ return getDataBoostIsolationReadOnlyFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder
+ getDataBoostIsolationReadOnlyOrBuilder() {
+ if ((isolationCase_ == 10) && (dataBoostIsolationReadOnlyBuilder_ != null)) {
+ return dataBoostIsolationReadOnlyBuilder_.getMessageOrBuilder();
+ } else {
+ if (isolationCase_ == 10) {
+ return (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_;
+ }
+ return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly,
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder,
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder>
+ getDataBoostIsolationReadOnlyFieldBuilder() {
+ if (dataBoostIsolationReadOnlyBuilder_ == null) {
+ if (!(isolationCase_ == 10)) {
+ isolation_ =
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ .getDefaultInstance();
+ }
+ dataBoostIsolationReadOnlyBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly,
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder,
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder>(
+ (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_,
+ getParentForChildren(),
+ isClean());
+ isolation_ = null;
+ }
+ isolationCase_ = 10;
+ onChanged();
+ return dataBoostIsolationReadOnlyBuilder_;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfileOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfileOrBuilder.java
index 262136da9e..e5fa6a2fa5 100644
--- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfileOrBuilder.java
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfileOrBuilder.java
@@ -209,7 +209,7 @@ public interface AppProfileOrBuilder
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return Whether the priority field is set.
*/
@java.lang.Deprecated
@@ -227,7 +227,7 @@ public interface AppProfileOrBuilder
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return The enum numeric value on the wire for priority.
*/
@java.lang.Deprecated
@@ -245,7 +245,7 @@ public interface AppProfileOrBuilder
* .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true];
*
* @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See
- * google/bigtable/admin/v2/instance.proto;l=332
+ * google/bigtable/admin/v2/instance.proto;l=361
* @return The priority.
*/
@java.lang.Deprecated
@@ -290,6 +290,52 @@ public interface AppProfileOrBuilder
com.google.bigtable.admin.v2.AppProfile.StandardIsolationOrBuilder
getStandardIsolationOrBuilder();
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ *
+ * @return Whether the dataBoostIsolationReadOnly field is set.
+ */
+ boolean hasDataBoostIsolationReadOnly();
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ *
+ * @return The dataBoostIsolationReadOnly.
+ */
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly
+ getDataBoostIsolationReadOnly();
+ /**
+ *
+ *
+ *
+ * Specifies that this app profile is intended for read-only usage via the
+ * Data Boost feature.
+ *
+ *
+ *
+ * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
+ *
+ */
+ com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder
+ getDataBoostIsolationReadOnlyOrBuilder();
+
com.google.bigtable.admin.v2.AppProfile.RoutingPolicyCase getRoutingPolicyCase();
com.google.bigtable.admin.v2.AppProfile.IsolationCase getIsolationCase();
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminProto.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminProto.java
index dc4d0a0a0b..a9290a9ae7 100644
--- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminProto.java
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminProto.java
@@ -108,6 +108,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
internal_static_google_bigtable_admin_v2_CheckConsistencyRequest_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_bigtable_admin_v2_CheckConsistencyRequest_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_bigtable_admin_v2_CheckConsistencyResponse_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -297,277 +305,283 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "nerateConsistencyTokenRequest\0228\n\004name\030\001 "
+ "\001(\tB*\340A\002\372A$\n\"bigtableadmin.googleapis.co"
+ "m/Table\"=\n GenerateConsistencyTokenRespo"
- + "nse\022\031\n\021consistency_token\030\001 \001(\t\"s\n\027CheckC"
- + "onsistencyRequest\0228\n\004name\030\001 \001(\tB*\340A\002\372A$\n"
- + "\"bigtableadmin.googleapis.com/Table\022\036\n\021c"
- + "onsistency_token\030\002 \001(\tB\003\340A\002\".\n\030CheckCons"
- + "istencyResponse\022\022\n\nconsistent\030\001 \001(\010\"\346\001\n\024"
- + "SnapshotTableRequest\0228\n\004name\030\001 \001(\tB*\340A\002\372"
- + "A$\n\"bigtableadmin.googleapis.com/Table\022="
- + "\n\007cluster\030\002 \001(\tB,\340A\002\372A&\n$bigtableadmin.g"
- + "oogleapis.com/Cluster\022\030\n\013snapshot_id\030\003 \001"
- + "(\tB\003\340A\002\022&\n\003ttl\030\004 \001(\0132\031.google.protobuf.D"
- + "uration\022\023\n\013description\030\005 \001(\t\"Q\n\022GetSnaps"
- + "hotRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%bigtab"
- + "leadmin.googleapis.com/Snapshot\"{\n\024ListS"
- + "napshotsRequest\022<\n\006parent\030\001 \001(\tB,\340A\002\372A&\n"
- + "$bigtableadmin.googleapis.com/Cluster\022\021\n"
- + "\tpage_size\030\002 \001(\005\022\022\n\npage_token\030\003 \001(\t\"g\n\025"
- + "ListSnapshotsResponse\0225\n\tsnapshots\030\001 \003(\013"
- + "2\".google.bigtable.admin.v2.Snapshot\022\027\n\017"
- + "next_page_token\030\002 \001(\t\"T\n\025DeleteSnapshotR"
- + "equest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%bigtablead"
- + "min.googleapis.com/Snapshot\"\304\001\n\025Snapshot"
- + "TableMetadata\022H\n\020original_request\030\001 \001(\0132"
- + "..google.bigtable.admin.v2.SnapshotTable"
- + "Request\0220\n\014request_time\030\002 \001(\0132\032.google.p"
- + "rotobuf.Timestamp\022/\n\013finish_time\030\003 \001(\0132\032"
- + ".google.protobuf.Timestamp\"\330\001\n\037CreateTab"
- + "leFromSnapshotMetadata\022R\n\020original_reque"
- + "st\030\001 \001(\01328.google.bigtable.admin.v2.Crea"
- + "teTableFromSnapshotRequest\0220\n\014request_ti"
- + "me\030\002 \001(\0132\032.google.protobuf.Timestamp\022/\n\013"
- + "finish_time\030\003 \001(\0132\032.google.protobuf.Time"
- + "stamp\"\242\001\n\023CreateBackupRequest\022<\n\006parent\030"
- + "\001 \001(\tB,\340A\002\372A&\n$bigtableadmin.googleapis."
- + "com/Cluster\022\026\n\tbackup_id\030\002 \001(\tB\003\340A\002\0225\n\006b"
- + "ackup\030\003 \001(\0132 .google.bigtable.admin.v2.B"
- + "ackupB\003\340A\002\"\230\001\n\024CreateBackupMetadata\022\014\n\004n"
- + "ame\030\001 \001(\t\022\024\n\014source_table\030\002 \001(\t\022.\n\nstart"
- + "_time\030\003 \001(\0132\032.google.protobuf.Timestamp\022"
- + ",\n\010end_time\030\004 \001(\0132\032.google.protobuf.Time"
- + "stamp\"\202\001\n\023UpdateBackupRequest\0225\n\006backup\030"
- + "\001 \001(\0132 .google.bigtable.admin.v2.BackupB"
- + "\003\340A\002\0224\n\013update_mask\030\002 \001(\0132\032.google.proto"
- + "buf.FieldMaskB\003\340A\002\"M\n\020GetBackupRequest\0229"
- + "\n\004name\030\001 \001(\tB+\340A\002\372A%\n#bigtableadmin.goog"
- + "leapis.com/Backup\"P\n\023DeleteBackupRequest"
- + "\0229\n\004name\030\001 \001(\tB+\340A\002\372A%\n#bigtableadmin.go"
- + "ogleapis.com/Backup\"\233\001\n\022ListBackupsReque"
- + "st\022<\n\006parent\030\001 \001(\tB,\340A\002\372A&\n$bigtableadmi"
- + "n.googleapis.com/Cluster\022\016\n\006filter\030\002 \001(\t"
- + "\022\020\n\010order_by\030\003 \001(\t\022\021\n\tpage_size\030\004 \001(\005\022\022\n"
- + "\npage_token\030\005 \001(\t\"a\n\023ListBackupsResponse"
- + "\0221\n\007backups\030\001 \003(\0132 .google.bigtable.admi"
- + "n.v2.Backup\022\027\n\017next_page_token\030\002 \001(\t\"\343\001\n"
- + "\021CopyBackupRequest\022<\n\006parent\030\001 \001(\tB,\340A\002\372"
- + "A&\n$bigtableadmin.googleapis.com/Cluster"
- + "\022\026\n\tbackup_id\030\002 \001(\tB\003\340A\002\022B\n\rsource_backu"
- + "p\030\003 \001(\tB+\340A\002\372A%\n#bigtableadmin.googleapi"
- + "s.com/Backup\0224\n\013expire_time\030\004 \001(\0132\032.goog"
- + "le.protobuf.TimestampB\003\340A\002\"\315\001\n\022CopyBacku"
- + "pMetadata\0226\n\004name\030\001 \001(\tB(\372A%\n#bigtablead"
- + "min.googleapis.com/Backup\022@\n\022source_back"
- + "up_info\030\002 \001(\0132$.google.bigtable.admin.v2"
- + ".BackupInfo\022=\n\010progress\030\003 \001(\0132+.google.b"
- + "igtable.admin.v2.OperationProgress\"\313\001\n\033C"
- + "reateAuthorizedViewRequest\022C\n\006parent\030\001 \001"
- + "(\tB3\340A\002\372A-\022+bigtableadmin.googleapis.com"
- + "/AuthorizedView\022\037\n\022authorized_view_id\030\002 "
- + "\001(\tB\003\340A\002\022F\n\017authorized_view\030\003 \001(\0132(.goog"
- + "le.bigtable.admin.v2.AuthorizedViewB\003\340A\002"
- + "\"\322\001\n\034CreateAuthorizedViewMetadata\022O\n\020ori"
- + "ginal_request\030\001 \001(\01325.google.bigtable.ad"
- + "min.v2.CreateAuthorizedViewRequest\0220\n\014re"
+ + "nse\022\031\n\021consistency_token\030\001 \001(\t\"\262\002\n\027Check"
+ + "ConsistencyRequest\0228\n\004name\030\001 \001(\tB*\340A\002\372A$"
+ + "\n\"bigtableadmin.googleapis.com/Table\022\036\n\021"
+ + "consistency_token\030\002 \001(\tB\003\340A\002\022Y\n\033standard"
+ + "_read_remote_writes\030\003 \001(\01322.google.bigta"
+ + "ble.admin.v2.StandardReadRemoteWritesH\000\022"
+ + "Z\n\034data_boost_read_local_writes\030\004 \001(\01322."
+ + "google.bigtable.admin.v2.DataBoostReadLo"
+ + "calWritesH\000B\006\n\004mode\"\032\n\030StandardReadRemot"
+ + "eWrites\"\032\n\030DataBoostReadLocalWrites\".\n\030C"
+ + "heckConsistencyResponse\022\022\n\nconsistent\030\001 "
+ + "\001(\010\"\346\001\n\024SnapshotTableRequest\0228\n\004name\030\001 \001"
+ + "(\tB*\340A\002\372A$\n\"bigtableadmin.googleapis.com"
+ + "/Table\022=\n\007cluster\030\002 \001(\tB,\340A\002\372A&\n$bigtabl"
+ + "eadmin.googleapis.com/Cluster\022\030\n\013snapsho"
+ + "t_id\030\003 \001(\tB\003\340A\002\022&\n\003ttl\030\004 \001(\0132\031.google.pr"
+ + "otobuf.Duration\022\023\n\013description\030\005 \001(\t\"Q\n\022"
+ + "GetSnapshotRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'"
+ + "\n%bigtableadmin.googleapis.com/Snapshot\""
+ + "{\n\024ListSnapshotsRequest\022<\n\006parent\030\001 \001(\tB"
+ + ",\340A\002\372A&\n$bigtableadmin.googleapis.com/Cl"
+ + "uster\022\021\n\tpage_size\030\002 \001(\005\022\022\n\npage_token\030\003"
+ + " \001(\t\"g\n\025ListSnapshotsResponse\0225\n\tsnapsho"
+ + "ts\030\001 \003(\0132\".google.bigtable.admin.v2.Snap"
+ + "shot\022\027\n\017next_page_token\030\002 \001(\t\"T\n\025DeleteS"
+ + "napshotRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%bi"
+ + "gtableadmin.googleapis.com/Snapshot\"\304\001\n\025"
+ + "SnapshotTableMetadata\022H\n\020original_reques"
+ + "t\030\001 \001(\0132..google.bigtable.admin.v2.Snaps"
+ + "hotTableRequest\0220\n\014request_time\030\002 \001(\0132\032."
+ + "google.protobuf.Timestamp\022/\n\013finish_time"
+ + "\030\003 \001(\0132\032.google.protobuf.Timestamp\"\330\001\n\037C"
+ + "reateTableFromSnapshotMetadata\022R\n\020origin"
+ + "al_request\030\001 \001(\01328.google.bigtable.admin"
+ + ".v2.CreateTableFromSnapshotRequest\0220\n\014re"
+ "quest_time\030\002 \001(\0132\032.google.protobuf.Times"
+ "tamp\022/\n\013finish_time\030\003 \001(\0132\032.google.proto"
- + "buf.Timestamp\"\334\001\n\032ListAuthorizedViewsReq"
- + "uest\022C\n\006parent\030\001 \001(\tB3\340A\002\372A-\022+bigtablead"
- + "min.googleapis.com/AuthorizedView\022\026\n\tpag"
- + "e_size\030\002 \001(\005B\003\340A\001\022\027\n\npage_token\030\003 \001(\tB\003\340"
- + "A\001\022H\n\004view\030\004 \001(\01625.google.bigtable.admin"
- + ".v2.AuthorizedView.ResponseViewB\003\340A\001\"z\n\033"
- + "ListAuthorizedViewsResponse\022B\n\020authorize"
- + "d_views\030\001 \003(\0132(.google.bigtable.admin.v2"
- + ".AuthorizedView\022\027\n\017next_page_token\030\002 \001(\t"
- + "\"\247\001\n\030GetAuthorizedViewRequest\022A\n\004name\030\001 "
- + "\001(\tB3\340A\002\372A-\n+bigtableadmin.googleapis.co"
- + "m/AuthorizedView\022H\n\004view\030\002 \001(\01625.google."
- + "bigtable.admin.v2.AuthorizedView.Respons"
- + "eViewB\003\340A\001\"\271\001\n\033UpdateAuthorizedViewReque"
- + "st\022F\n\017authorized_view\030\001 \001(\0132(.google.big"
- + "table.admin.v2.AuthorizedViewB\003\340A\002\0224\n\013up"
- + "date_mask\030\002 \001(\0132\032.google.protobuf.FieldM"
- + "askB\003\340A\001\022\034\n\017ignore_warnings\030\003 \001(\010B\003\340A\001\"\322"
- + "\001\n\034UpdateAuthorizedViewMetadata\022O\n\020origi"
- + "nal_request\030\001 \001(\01325.google.bigtable.admi"
- + "n.v2.UpdateAuthorizedViewRequest\0220\n\014requ"
- + "est_time\030\002 \001(\0132\032.google.protobuf.Timesta"
- + "mp\022/\n\013finish_time\030\003 \001(\0132\032.google.protobu"
- + "f.Timestamp\"s\n\033DeleteAuthorizedViewReque"
- + "st\022A\n\004name\030\001 \001(\tB3\340A\002\372A-\n+bigtableadmin."
- + "googleapis.com/AuthorizedView\022\021\n\004etag\030\002 "
- + "\001(\tB\003\340A\0012\2663\n\022BigtableTableAdmin\022\253\001\n\013Crea"
- + "teTable\022,.google.bigtable.admin.v2.Creat"
- + "eTableRequest\032\037.google.bigtable.admin.v2"
- + ".Table\"M\332A\025parent,table_id,table\202\323\344\223\002/\"*"
- + "/v2/{parent=projects/*/instances/*}/tabl"
- + "es:\001*\022\212\002\n\027CreateTableFromSnapshot\0228.goog"
- + "le.bigtable.admin.v2.CreateTableFromSnap"
- + "shotRequest\032\035.google.longrunning.Operati"
- + "on\"\225\001\312A(\n\005Table\022\037CreateTableFromSnapshot"
- + "Metadata\332A\037parent,table_id,source_snapsh"
- + "ot\202\323\344\223\002B\"=/v2/{parent=projects/*/instanc"
- + "es/*}/tables:createFromSnapshot:\001*\022\244\001\n\nL"
- + "istTables\022+.google.bigtable.admin.v2.Lis"
- + "tTablesRequest\032,.google.bigtable.admin.v"
- + "2.ListTablesResponse\";\332A\006parent\202\323\344\223\002,\022*/"
- + "v2/{parent=projects/*/instances/*}/table"
- + "s\022\221\001\n\010GetTable\022).google.bigtable.admin.v"
- + "2.GetTableRequest\032\037.google.bigtable.admi"
- + "n.v2.Table\"9\332A\004name\202\323\344\223\002,\022*/v2/{name=pro"
- + "jects/*/instances/*/tables/*}\022\316\001\n\013Update"
- + "Table\022,.google.bigtable.admin.v2.UpdateT"
- + "ableRequest\032\035.google.longrunning.Operati"
- + "on\"r\312A\034\n\005Table\022\023UpdateTableMetadata\332A\021ta"
- + "ble,update_mask\202\323\344\223\002920/v2/{table.name=p"
- + "rojects/*/instances/*/tables/*}:\005table\022\216"
- + "\001\n\013DeleteTable\022,.google.bigtable.admin.v"
- + "2.DeleteTableRequest\032\026.google.protobuf.E"
- + "mpty\"9\332A\004name\202\323\344\223\002,**/v2/{name=projects/"
- + "*/instances/*/tables/*}\022\306\001\n\rUndeleteTabl"
- + "e\022..google.bigtable.admin.v2.UndeleteTab"
- + "leRequest\032\035.google.longrunning.Operation"
- + "\"f\312A\036\n\005Table\022\025UndeleteTableMetadata\332A\004na"
- + "me\202\323\344\223\0028\"3/v2/{name=projects/*/instances"
- + "/*/tables/*}:undelete:\001*\022\241\002\n\024CreateAutho"
- + "rizedView\0225.google.bigtable.admin.v2.Cre"
- + "ateAuthorizedViewRequest\032\035.google.longru"
- + "nning.Operation\"\262\001\312A.\n\016AuthorizedView\022\034C"
- + "reateAuthorizedViewMetadata\332A)parent,aut"
- + "horized_view,authorized_view_id\202\323\344\223\002O\""
- + "v2/{parent=projects/*/instances/*/tables"
- + "/*}/authorizedViews:\017authorized_view\022\321\001\n"
- + "\023ListAuthorizedViews\0224.google.bigtable.a"
- + "dmin.v2.ListAuthorizedViewsRequest\0325.goo"
- + "gle.bigtable.admin.v2.ListAuthorizedView"
- + "sResponse\"M\332A\006parent\202\323\344\223\002>\022\022*\022\022*
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ *
+ * @return Whether the standardReadRemoteWrites field is set.
+ */
+ @java.lang.Override
+ public boolean hasStandardReadRemoteWrites() {
+ return modeCase_ == 3;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ *
+ * @return The standardReadRemoteWrites.
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.StandardReadRemoteWrites getStandardReadRemoteWrites() {
+ if (modeCase_ == 3) {
+ return (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_;
+ }
+ return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance();
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder
+ getStandardReadRemoteWritesOrBuilder() {
+ if (modeCase_ == 3) {
+ return (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_;
+ }
+ return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance();
+ }
+
+ public static final int DATA_BOOST_READ_LOCAL_WRITES_FIELD_NUMBER = 4;
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ *
+ * @return Whether the dataBoostReadLocalWrites field is set.
+ */
+ @java.lang.Override
+ public boolean hasDataBoostReadLocalWrites() {
+ return modeCase_ == 4;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ *
+ * @return The dataBoostReadLocalWrites.
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDataBoostReadLocalWrites() {
+ if (modeCase_ == 4) {
+ return (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_;
+ }
+ return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance();
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder
+ getDataBoostReadLocalWritesOrBuilder() {
+ if (modeCase_ == 4) {
+ return (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_;
+ }
+ return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance();
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -195,6 +366,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(consistencyToken_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 2, consistencyToken_);
}
+ if (modeCase_ == 3) {
+ output.writeMessage(3, (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_);
+ }
+ if (modeCase_ == 4) {
+ output.writeMessage(4, (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_);
+ }
getUnknownFields().writeTo(output);
}
@@ -210,6 +387,16 @@ public int getSerializedSize() {
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(consistencyToken_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, consistencyToken_);
}
+ if (modeCase_ == 3) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(
+ 3, (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_);
+ }
+ if (modeCase_ == 4) {
+ size +=
+ com.google.protobuf.CodedOutputStream.computeMessageSize(
+ 4, (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
@@ -228,6 +415,19 @@ public boolean equals(final java.lang.Object obj) {
if (!getName().equals(other.getName())) return false;
if (!getConsistencyToken().equals(other.getConsistencyToken())) return false;
+ if (!getModeCase().equals(other.getModeCase())) return false;
+ switch (modeCase_) {
+ case 3:
+ if (!getStandardReadRemoteWrites().equals(other.getStandardReadRemoteWrites()))
+ return false;
+ break;
+ case 4:
+ if (!getDataBoostReadLocalWrites().equals(other.getDataBoostReadLocalWrites()))
+ return false;
+ break;
+ case 0:
+ default:
+ }
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@@ -243,6 +443,18 @@ public int hashCode() {
hash = (53 * hash) + getName().hashCode();
hash = (37 * hash) + CONSISTENCY_TOKEN_FIELD_NUMBER;
hash = (53 * hash) + getConsistencyToken().hashCode();
+ switch (modeCase_) {
+ case 3:
+ hash = (37 * hash) + STANDARD_READ_REMOTE_WRITES_FIELD_NUMBER;
+ hash = (53 * hash) + getStandardReadRemoteWrites().hashCode();
+ break;
+ case 4:
+ hash = (37 * hash) + DATA_BOOST_READ_LOCAL_WRITES_FIELD_NUMBER;
+ hash = (53 * hash) + getDataBoostReadLocalWrites().hashCode();
+ break;
+ case 0:
+ default:
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -385,6 +597,14 @@ public Builder clear() {
bitField0_ = 0;
name_ = "";
consistencyToken_ = "";
+ if (standardReadRemoteWritesBuilder_ != null) {
+ standardReadRemoteWritesBuilder_.clear();
+ }
+ if (dataBoostReadLocalWritesBuilder_ != null) {
+ dataBoostReadLocalWritesBuilder_.clear();
+ }
+ modeCase_ = 0;
+ mode_ = null;
return this;
}
@@ -415,6 +635,7 @@ public com.google.bigtable.admin.v2.CheckConsistencyRequest buildPartial() {
if (bitField0_ != 0) {
buildPartial0(result);
}
+ buildPartialOneofs(result);
onBuilt();
return result;
}
@@ -429,6 +650,17 @@ private void buildPartial0(com.google.bigtable.admin.v2.CheckConsistencyRequest
}
}
+ private void buildPartialOneofs(com.google.bigtable.admin.v2.CheckConsistencyRequest result) {
+ result.modeCase_ = modeCase_;
+ result.mode_ = this.mode_;
+ if (modeCase_ == 3 && standardReadRemoteWritesBuilder_ != null) {
+ result.mode_ = standardReadRemoteWritesBuilder_.build();
+ }
+ if (modeCase_ == 4 && dataBoostReadLocalWritesBuilder_ != null) {
+ result.mode_ = dataBoostReadLocalWritesBuilder_.build();
+ }
+ }
+
@java.lang.Override
public Builder clone() {
return super.clone();
@@ -485,6 +717,22 @@ public Builder mergeFrom(com.google.bigtable.admin.v2.CheckConsistencyRequest ot
bitField0_ |= 0x00000002;
onChanged();
}
+ switch (other.getModeCase()) {
+ case STANDARD_READ_REMOTE_WRITES:
+ {
+ mergeStandardReadRemoteWrites(other.getStandardReadRemoteWrites());
+ break;
+ }
+ case DATA_BOOST_READ_LOCAL_WRITES:
+ {
+ mergeDataBoostReadLocalWrites(other.getDataBoostReadLocalWrites());
+ break;
+ }
+ case MODE_NOT_SET:
+ {
+ break;
+ }
+ }
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
@@ -523,6 +771,20 @@ public Builder mergeFrom(
bitField0_ |= 0x00000002;
break;
} // case 18
+ case 26:
+ {
+ input.readMessage(
+ getStandardReadRemoteWritesFieldBuilder().getBuilder(), extensionRegistry);
+ modeCase_ = 3;
+ break;
+ } // case 26
+ case 34:
+ {
+ input.readMessage(
+ getDataBoostReadLocalWritesFieldBuilder().getBuilder(), extensionRegistry);
+ modeCase_ = 4;
+ break;
+ } // case 34
default:
{
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
@@ -540,6 +802,20 @@ public Builder mergeFrom(
return this;
}
+ private int modeCase_ = 0;
+ private java.lang.Object mode_;
+
+ public ModeCase getModeCase() {
+ return ModeCase.forNumber(modeCase_);
+ }
+
+ public Builder clearMode() {
+ modeCase_ = 0;
+ mode_ = null;
+ onChanged();
+ return this;
+ }
+
private int bitField0_;
private java.lang.Object name_ = "";
@@ -774,6 +1050,488 @@ public Builder setConsistencyTokenBytes(com.google.protobuf.ByteString value) {
return this;
}
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites,
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder,
+ com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder>
+ standardReadRemoteWritesBuilder_;
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ *
+ * @return Whether the standardReadRemoteWrites field is set.
+ */
+ @java.lang.Override
+ public boolean hasStandardReadRemoteWrites() {
+ return modeCase_ == 3;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ *
+ * @return The standardReadRemoteWrites.
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.StandardReadRemoteWrites getStandardReadRemoteWrites() {
+ if (standardReadRemoteWritesBuilder_ == null) {
+ if (modeCase_ == 3) {
+ return (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_;
+ }
+ return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance();
+ } else {
+ if (modeCase_ == 3) {
+ return standardReadRemoteWritesBuilder_.getMessage();
+ }
+ return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ public Builder setStandardReadRemoteWrites(
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites value) {
+ if (standardReadRemoteWritesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ mode_ = value;
+ onChanged();
+ } else {
+ standardReadRemoteWritesBuilder_.setMessage(value);
+ }
+ modeCase_ = 3;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ public Builder setStandardReadRemoteWrites(
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder builderForValue) {
+ if (standardReadRemoteWritesBuilder_ == null) {
+ mode_ = builderForValue.build();
+ onChanged();
+ } else {
+ standardReadRemoteWritesBuilder_.setMessage(builderForValue.build());
+ }
+ modeCase_ = 3;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ public Builder mergeStandardReadRemoteWrites(
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites value) {
+ if (standardReadRemoteWritesBuilder_ == null) {
+ if (modeCase_ == 3
+ && mode_
+ != com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance()) {
+ mode_ =
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.newBuilder(
+ (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ mode_ = value;
+ }
+ onChanged();
+ } else {
+ if (modeCase_ == 3) {
+ standardReadRemoteWritesBuilder_.mergeFrom(value);
+ } else {
+ standardReadRemoteWritesBuilder_.setMessage(value);
+ }
+ }
+ modeCase_ = 3;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ public Builder clearStandardReadRemoteWrites() {
+ if (standardReadRemoteWritesBuilder_ == null) {
+ if (modeCase_ == 3) {
+ modeCase_ = 0;
+ mode_ = null;
+ onChanged();
+ }
+ } else {
+ if (modeCase_ == 3) {
+ modeCase_ = 0;
+ mode_ = null;
+ }
+ standardReadRemoteWritesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ public com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder
+ getStandardReadRemoteWritesBuilder() {
+ return getStandardReadRemoteWritesFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder
+ getStandardReadRemoteWritesOrBuilder() {
+ if ((modeCase_ == 3) && (standardReadRemoteWritesBuilder_ != null)) {
+ return standardReadRemoteWritesBuilder_.getMessageOrBuilder();
+ } else {
+ if (modeCase_ == 3) {
+ return (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_;
+ }
+ return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites,
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder,
+ com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder>
+ getStandardReadRemoteWritesFieldBuilder() {
+ if (standardReadRemoteWritesBuilder_ == null) {
+ if (!(modeCase_ == 3)) {
+ mode_ = com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance();
+ }
+ standardReadRemoteWritesBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites,
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder,
+ com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder>(
+ (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_,
+ getParentForChildren(),
+ isClean());
+ mode_ = null;
+ }
+ modeCase_ = 3;
+ onChanged();
+ return standardReadRemoteWritesBuilder_;
+ }
+
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites,
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder,
+ com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder>
+ dataBoostReadLocalWritesBuilder_;
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ *
+ * @return Whether the dataBoostReadLocalWrites field is set.
+ */
+ @java.lang.Override
+ public boolean hasDataBoostReadLocalWrites() {
+ return modeCase_ == 4;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ *
+ * @return The dataBoostReadLocalWrites.
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDataBoostReadLocalWrites() {
+ if (dataBoostReadLocalWritesBuilder_ == null) {
+ if (modeCase_ == 4) {
+ return (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_;
+ }
+ return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance();
+ } else {
+ if (modeCase_ == 4) {
+ return dataBoostReadLocalWritesBuilder_.getMessage();
+ }
+ return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ public Builder setDataBoostReadLocalWrites(
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites value) {
+ if (dataBoostReadLocalWritesBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ mode_ = value;
+ onChanged();
+ } else {
+ dataBoostReadLocalWritesBuilder_.setMessage(value);
+ }
+ modeCase_ = 4;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ public Builder setDataBoostReadLocalWrites(
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder builderForValue) {
+ if (dataBoostReadLocalWritesBuilder_ == null) {
+ mode_ = builderForValue.build();
+ onChanged();
+ } else {
+ dataBoostReadLocalWritesBuilder_.setMessage(builderForValue.build());
+ }
+ modeCase_ = 4;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ public Builder mergeDataBoostReadLocalWrites(
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites value) {
+ if (dataBoostReadLocalWritesBuilder_ == null) {
+ if (modeCase_ == 4
+ && mode_
+ != com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance()) {
+ mode_ =
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.newBuilder(
+ (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_)
+ .mergeFrom(value)
+ .buildPartial();
+ } else {
+ mode_ = value;
+ }
+ onChanged();
+ } else {
+ if (modeCase_ == 4) {
+ dataBoostReadLocalWritesBuilder_.mergeFrom(value);
+ } else {
+ dataBoostReadLocalWritesBuilder_.setMessage(value);
+ }
+ }
+ modeCase_ = 4;
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ public Builder clearDataBoostReadLocalWrites() {
+ if (dataBoostReadLocalWritesBuilder_ == null) {
+ if (modeCase_ == 4) {
+ modeCase_ = 0;
+ mode_ = null;
+ onChanged();
+ }
+ } else {
+ if (modeCase_ == 4) {
+ modeCase_ = 0;
+ mode_ = null;
+ }
+ dataBoostReadLocalWritesBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder
+ getDataBoostReadLocalWritesBuilder() {
+ return getDataBoostReadLocalWritesFieldBuilder().getBuilder();
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder
+ getDataBoostReadLocalWritesOrBuilder() {
+ if ((modeCase_ == 4) && (dataBoostReadLocalWritesBuilder_ != null)) {
+ return dataBoostReadLocalWritesBuilder_.getMessageOrBuilder();
+ } else {
+ if (modeCase_ == 4) {
+ return (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_;
+ }
+ return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites,
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder,
+ com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder>
+ getDataBoostReadLocalWritesFieldBuilder() {
+ if (dataBoostReadLocalWritesBuilder_ == null) {
+ if (!(modeCase_ == 4)) {
+ mode_ = com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance();
+ }
+ dataBoostReadLocalWritesBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites,
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder,
+ com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder>(
+ (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_,
+ getParentForChildren(),
+ isClean());
+ mode_ = null;
+ }
+ modeCase_ = 4;
+ onChanged();
+ return dataBoostReadLocalWritesBuilder_;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CheckConsistencyRequestOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CheckConsistencyRequestOrBuilder.java
index 7f47bb6720..259e282606 100644
--- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CheckConsistencyRequestOrBuilder.java
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CheckConsistencyRequestOrBuilder.java
@@ -81,4 +81,96 @@ public interface CheckConsistencyRequestOrBuilder
* @return The bytes for consistencyToken.
*/
com.google.protobuf.ByteString getConsistencyTokenBytes();
+
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ *
+ * @return Whether the standardReadRemoteWrites field is set.
+ */
+ boolean hasStandardReadRemoteWrites();
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ *
+ * @return The standardReadRemoteWrites.
+ */
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites getStandardReadRemoteWrites();
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `StandardIsolation` can
+ * see all writes committed before the token was created, even if the
+ * read and write target different clusters.
+ *
+ *
+ * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3;
+ *
+ */
+ com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder
+ getStandardReadRemoteWritesOrBuilder();
+
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ *
+ * @return Whether the dataBoostReadLocalWrites field is set.
+ */
+ boolean hasDataBoostReadLocalWrites();
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ *
+ * @return The dataBoostReadLocalWrites.
+ */
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDataBoostReadLocalWrites();
+ /**
+ *
+ *
+ *
+ * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ * can see all writes committed before the token was created, but only if
+ * the read and write target the same cluster.
+ *
+ *
+ * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ *
+ */
+ com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder
+ getDataBoostReadLocalWritesOrBuilder();
+
+ com.google.bigtable.admin.v2.CheckConsistencyRequest.ModeCase getModeCase();
}
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWrites.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWrites.java
new file mode 100644
index 0000000000..88f7fa88ae
--- /dev/null
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWrites.java
@@ -0,0 +1,435 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/bigtable/admin/v2/bigtable_table_admin.proto
+
+// Protobuf Java Version: 3.25.2
+package com.google.bigtable.admin.v2;
+
+/**
+ *
+ *
+ *
+ * Checks that all writes before the consistency token was generated in the same
+ * cluster are readable by Databoost.
+ *
+ *
+ * Protobuf type {@code google.bigtable.admin.v2.DataBoostReadLocalWrites}
+ */
+public final class DataBoostReadLocalWrites extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.bigtable.admin.v2.DataBoostReadLocalWrites)
+ DataBoostReadLocalWritesOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use DataBoostReadLocalWrites.newBuilder() to construct.
+ private DataBoostReadLocalWrites(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private DataBoostReadLocalWrites() {}
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new DataBoostReadLocalWrites();
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.class,
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder.class);
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ getUnknownFields().writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.bigtable.admin.v2.DataBoostReadLocalWrites)) {
+ return super.equals(obj);
+ }
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites other =
+ (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) obj;
+
+ if (!getUnknownFields().equals(other.getUnknownFields())) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that all writes before the consistency token was generated in the same
+ * cluster are readable by Databoost.
+ *
+ *
+ * Protobuf type {@code google.bigtable.admin.v2.DataBoostReadLocalWrites}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.bigtable.admin.v2.DataBoostReadLocalWrites)
+ com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.class,
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder.class);
+ }
+
+ // Construct using com.google.bigtable.admin.v2.DataBoostReadLocalWrites.newBuilder()
+ private Builder() {}
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDefaultInstanceForType() {
+ return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWrites build() {
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWrites buildPartial() {
+ com.google.bigtable.admin.v2.DataBoostReadLocalWrites result =
+ new com.google.bigtable.admin.v2.DataBoostReadLocalWrites(this);
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.bigtable.admin.v2.DataBoostReadLocalWrites) {
+ return mergeFrom((com.google.bigtable.admin.v2.DataBoostReadLocalWrites) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.bigtable.admin.v2.DataBoostReadLocalWrites other) {
+ if (other == com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance())
+ return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default:
+ {
+ if (!super.parseUnknownField(input, extensionRegistry, tag)) {
+ done = true; // was an endgroup tag
+ }
+ break;
+ } // default:
+ } // switch (tag)
+ } // while (!done)
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.unwrapIOException();
+ } finally {
+ onChanged();
+ } // finally
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.bigtable.admin.v2.DataBoostReadLocalWrites)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DataBoostReadLocalWrites)
+ private static final com.google.bigtable.admin.v2.DataBoostReadLocalWrites DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.bigtable.admin.v2.DataBoostReadLocalWrites();
+ }
+
+ public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public DataBoostReadLocalWrites parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ Builder builder = newBuilder();
+ try {
+ builder.mergeFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(builder.buildPartial());
+ } catch (com.google.protobuf.UninitializedMessageException e) {
+ throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e)
+ .setUnfinishedMessage(builder.buildPartial());
+ }
+ return builder.buildPartial();
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+}
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWritesOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWritesOrBuilder.java
new file mode 100644
index 0000000000..be217512d3
--- /dev/null
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWritesOrBuilder.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/bigtable/admin/v2/bigtable_table_admin.proto
+
+// Protobuf Java Version: 3.25.2
+package com.google.bigtable.admin.v2;
+
+public interface DataBoostReadLocalWritesOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.bigtable.admin.v2.DataBoostReadLocalWrites)
+ com.google.protobuf.MessageOrBuilder {}
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/InstanceProto.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/InstanceProto.java
index c6e08673fa..29a37bf8f6 100644
--- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/InstanceProto.java
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/InstanceProto.java
@@ -76,6 +76,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r
internal_static_google_bigtable_admin_v2_AppProfile_StandardIsolation_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
internal_static_google_bigtable_admin_v2_AppProfile_StandardIsolation_fieldAccessorTable;
+ static final com.google.protobuf.Descriptors.Descriptor
+ internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor;
+ static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_fieldAccessorTable;
static final com.google.protobuf.Descriptors.Descriptor
internal_static_google_bigtable_admin_v2_HotTablet_descriptor;
static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
@@ -137,7 +141,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "\002\022\014\n\010RESIZING\020\003\022\014\n\010DISABLED\020\004:e\352Ab\n$bigt"
+ "ableadmin.googleapis.com/Cluster\022:projec"
+ "ts/{project}/instances/{instance}/cluste"
- + "rs/{cluster}B\010\n\006config\"\350\006\n\nAppProfile\022\014\n"
+ + "rs/{cluster}B\010\n\006config\"\322\t\n\nAppProfile\022\014\n"
+ "\004name\030\001 \001(\t\022\014\n\004etag\030\002 \001(\t\022\023\n\013description"
+ "\030\003 \001(\t\022g\n\035multi_cluster_routing_use_any\030"
+ "\005 \001(\0132>.google.bigtable.admin.v2.AppProf"
@@ -148,36 +152,46 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ ".admin.v2.AppProfile.PriorityB\002\030\001H\001\022T\n\022s"
+ "tandard_isolation\030\013 \001(\01326.google.bigtabl"
+ "e.admin.v2.AppProfile.StandardIsolationH"
- + "\001\0320\n\031MultiClusterRoutingUseAny\022\023\n\013cluste"
- + "r_ids\030\001 \003(\t\032N\n\024SingleClusterRouting\022\022\n\nc"
- + "luster_id\030\001 \001(\t\022\"\n\032allow_transactional_w"
- + "rites\030\002 \001(\010\032T\n\021StandardIsolation\022?\n\010prio"
- + "rity\030\001 \001(\0162-.google.bigtable.admin.v2.Ap"
- + "pProfile.Priority\"^\n\010Priority\022\030\n\024PRIORIT"
- + "Y_UNSPECIFIED\020\000\022\020\n\014PRIORITY_LOW\020\001\022\023\n\017PRI"
- + "ORITY_MEDIUM\020\002\022\021\n\rPRIORITY_HIGH\020\003:o\352Al\n\'"
- + "bigtableadmin.googleapis.com/AppProfile\022"
- + "Aprojects/{project}/instances/{instance}"
- + "/appProfiles/{app_profile}B\020\n\016routing_po"
- + "licyB\013\n\tisolation\"\210\003\n\tHotTablet\022\014\n\004name\030"
- + "\001 \001(\t\022;\n\ntable_name\030\002 \001(\tB\'\372A$\n\"bigtable"
- + "admin.googleapis.com/Table\0223\n\nstart_time"
- + "\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022"
- + "1\n\010end_time\030\004 \001(\0132\032.google.protobuf.Time"
- + "stampB\003\340A\003\022\021\n\tstart_key\030\005 \001(\t\022\017\n\007end_key"
- + "\030\006 \001(\t\022#\n\026node_cpu_usage_percent\030\007 \001(\002B\003"
- + "\340A\003:\177\352A|\n&bigtableadmin.googleapis.com/H"
- + "otTablet\022Rprojects/{project}/instances/{"
- + "instance}/clusters/{cluster}/hotTablets/"
- + "{hot_tablet}B\320\002\n\034com.google.bigtable.adm"
- + "in.v2B\rInstanceProtoP\001Z=google.golang.or"
- + "g/genproto/googleapis/bigtable/admin/v2;"
- + "admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002"
- + "\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002\"Google"
- + "::Cloud::Bigtable::Admin::V2\352Ax\n!cloudkm"
- + "s.googleapis.com/CryptoKey\022Sprojects/{pr"
- + "oject}/locations/{location}/keyRings/{ke"
- + "y_ring}/cryptoKeys/{crypto_key}b\006proto3"
+ + "\001\022i\n\036data_boost_isolation_read_only\030\n \001("
+ + "\0132?.google.bigtable.admin.v2.AppProfile."
+ + "DataBoostIsolationReadOnlyH\001\0320\n\031MultiClu"
+ + "sterRoutingUseAny\022\023\n\013cluster_ids\030\001 \003(\t\032N"
+ + "\n\024SingleClusterRouting\022\022\n\ncluster_id\030\001 \001"
+ + "(\t\022\"\n\032allow_transactional_writes\030\002 \001(\010\032T"
+ + "\n\021StandardIsolation\022?\n\010priority\030\001 \001(\0162-."
+ + "google.bigtable.admin.v2.AppProfile.Prio"
+ + "rity\032\374\001\n\032DataBoostIsolationReadOnly\022w\n\025c"
+ + "ompute_billing_owner\030\001 \001(\0162S.google.bigt"
+ + "able.admin.v2.AppProfile.DataBoostIsolat"
+ + "ionReadOnly.ComputeBillingOwnerH\000\210\001\001\"K\n\023"
+ + "ComputeBillingOwner\022%\n!COMPUTE_BILLING_O"
+ + "WNER_UNSPECIFIED\020\000\022\r\n\tHOST_PAYS\020\001B\030\n\026_co"
+ + "mpute_billing_owner\"^\n\010Priority\022\030\n\024PRIOR"
+ + "ITY_UNSPECIFIED\020\000\022\020\n\014PRIORITY_LOW\020\001\022\023\n\017P"
+ + "RIORITY_MEDIUM\020\002\022\021\n\rPRIORITY_HIGH\020\003:o\352Al"
+ + "\n\'bigtableadmin.googleapis.com/AppProfil"
+ + "e\022Aprojects/{project}/instances/{instanc"
+ + "e}/appProfiles/{app_profile}B\020\n\016routing_"
+ + "policyB\013\n\tisolation\"\210\003\n\tHotTablet\022\014\n\004nam"
+ + "e\030\001 \001(\t\022;\n\ntable_name\030\002 \001(\tB\'\372A$\n\"bigtab"
+ + "leadmin.googleapis.com/Table\0223\n\nstart_ti"
+ + "me\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A"
+ + "\003\0221\n\010end_time\030\004 \001(\0132\032.google.protobuf.Ti"
+ + "mestampB\003\340A\003\022\021\n\tstart_key\030\005 \001(\t\022\017\n\007end_k"
+ + "ey\030\006 \001(\t\022#\n\026node_cpu_usage_percent\030\007 \001(\002"
+ + "B\003\340A\003:\177\352A|\n&bigtableadmin.googleapis.com"
+ + "/HotTablet\022Rprojects/{project}/instances"
+ + "/{instance}/clusters/{cluster}/hotTablet"
+ + "s/{hot_tablet}B\320\002\n\034com.google.bigtable.a"
+ + "dmin.v2B\rInstanceProtoP\001Z=google.golang."
+ + "org/genproto/googleapis/bigtable/admin/v"
+ + "2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2"
+ + "\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002\"Goog"
+ + "le::Cloud::Bigtable::Admin::V2\352Ax\n!cloud"
+ + "kms.googleapis.com/CryptoKey\022Sprojects/{"
+ + "project}/locations/{location}/keyRings/{"
+ + "key_ring}/cryptoKeys/{crypto_key}b\006proto"
+ + "3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -272,6 +286,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"SingleClusterRouting",
"Priority",
"StandardIsolation",
+ "DataBoostIsolationReadOnly",
"RoutingPolicy",
"Isolation",
});
@@ -299,6 +314,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new java.lang.String[] {
"Priority",
});
+ internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor =
+ internal_static_google_bigtable_admin_v2_AppProfile_descriptor.getNestedTypes().get(3);
+ internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_fieldAccessorTable =
+ new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
+ internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor,
+ new java.lang.String[] {
+ "ComputeBillingOwner",
+ });
internal_static_google_bigtable_admin_v2_HotTablet_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_google_bigtable_admin_v2_HotTablet_fieldAccessorTable =
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWrites.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWrites.java
new file mode 100644
index 0000000000..6546f5e47c
--- /dev/null
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWrites.java
@@ -0,0 +1,435 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/bigtable/admin/v2/bigtable_table_admin.proto
+
+// Protobuf Java Version: 3.25.2
+package com.google.bigtable.admin.v2;
+
+/**
+ *
+ *
+ *
+ * Checks that all writes before the consistency token was generated are
+ * replicated in every cluster and readable.
+ *
+ *
+ * Protobuf type {@code google.bigtable.admin.v2.StandardReadRemoteWrites}
+ */
+public final class StandardReadRemoteWrites extends com.google.protobuf.GeneratedMessageV3
+ implements
+ // @@protoc_insertion_point(message_implements:google.bigtable.admin.v2.StandardReadRemoteWrites)
+ StandardReadRemoteWritesOrBuilder {
+ private static final long serialVersionUID = 0L;
+ // Use StandardReadRemoteWrites.newBuilder() to construct.
+ private StandardReadRemoteWrites(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
+ super(builder);
+ }
+
+ private StandardReadRemoteWrites() {}
+
+ @java.lang.Override
+ @SuppressWarnings({"unused"})
+ protected java.lang.Object newInstance(UnusedPrivateParameter unused) {
+ return new StandardReadRemoteWrites();
+ }
+
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.class,
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder.class);
+ }
+
+ private byte memoizedIsInitialized = -1;
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ byte isInitialized = memoizedIsInitialized;
+ if (isInitialized == 1) return true;
+ if (isInitialized == 0) return false;
+
+ memoizedIsInitialized = 1;
+ return true;
+ }
+
+ @java.lang.Override
+ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException {
+ getUnknownFields().writeTo(output);
+ }
+
+ @java.lang.Override
+ public int getSerializedSize() {
+ int size = memoizedSize;
+ if (size != -1) return size;
+
+ size = 0;
+ size += getUnknownFields().getSerializedSize();
+ memoizedSize = size;
+ return size;
+ }
+
+ @java.lang.Override
+ public boolean equals(final java.lang.Object obj) {
+ if (obj == this) {
+ return true;
+ }
+ if (!(obj instanceof com.google.bigtable.admin.v2.StandardReadRemoteWrites)) {
+ return super.equals(obj);
+ }
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites other =
+ (com.google.bigtable.admin.v2.StandardReadRemoteWrites) obj;
+
+ if (!getUnknownFields().equals(other.getUnknownFields())) return false;
+ return true;
+ }
+
+ @java.lang.Override
+ public int hashCode() {
+ if (memoizedHashCode != 0) {
+ return memoizedHashCode;
+ }
+ int hash = 41;
+ hash = (19 * hash) + getDescriptor().hashCode();
+ hash = (29 * hash) + getUnknownFields().hashCode();
+ memoizedHashCode = hash;
+ return hash;
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ com.google.protobuf.ByteString data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ com.google.protobuf.ByteString data,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(byte[] data)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ return PARSER.parseFrom(data, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseDelimitedFrom(
+ java.io.InputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseDelimitedFrom(
+ java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ com.google.protobuf.CodedInputStream input) throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input);
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ return com.google.protobuf.GeneratedMessageV3.parseWithIOException(
+ PARSER, input, extensionRegistry);
+ }
+
+ @java.lang.Override
+ public Builder newBuilderForType() {
+ return newBuilder();
+ }
+
+ public static Builder newBuilder() {
+ return DEFAULT_INSTANCE.toBuilder();
+ }
+
+ public static Builder newBuilder(
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites prototype) {
+ return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype);
+ }
+
+ @java.lang.Override
+ public Builder toBuilder() {
+ return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this);
+ }
+
+ @java.lang.Override
+ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ Builder builder = new Builder(parent);
+ return builder;
+ }
+ /**
+ *
+ *
+ *
+ * Checks that all writes before the consistency token was generated are
+ * replicated in every cluster and readable.
+ *
+ *
+ * Protobuf type {@code google.bigtable.admin.v2.StandardReadRemoteWrites}
+ */
+ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder
+ implements
+ // @@protoc_insertion_point(builder_implements:google.bigtable.admin.v2.StandardReadRemoteWrites)
+ com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder {
+ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_descriptor;
+ }
+
+ @java.lang.Override
+ protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable
+ internalGetFieldAccessorTable() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_fieldAccessorTable
+ .ensureFieldAccessorsInitialized(
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.class,
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder.class);
+ }
+
+ // Construct using com.google.bigtable.admin.v2.StandardReadRemoteWrites.newBuilder()
+ private Builder() {}
+
+ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
+ super(parent);
+ }
+
+ @java.lang.Override
+ public Builder clear() {
+ super.clear();
+ return this;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() {
+ return com.google.bigtable.admin.v2.BigtableTableAdminProto
+ .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_descriptor;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.StandardReadRemoteWrites getDefaultInstanceForType() {
+ return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance();
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.StandardReadRemoteWrites build() {
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites result = buildPartial();
+ if (!result.isInitialized()) {
+ throw newUninitializedMessageException(result);
+ }
+ return result;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.StandardReadRemoteWrites buildPartial() {
+ com.google.bigtable.admin.v2.StandardReadRemoteWrites result =
+ new com.google.bigtable.admin.v2.StandardReadRemoteWrites(this);
+ onBuilt();
+ return result;
+ }
+
+ @java.lang.Override
+ public Builder clone() {
+ return super.clone();
+ }
+
+ @java.lang.Override
+ public Builder setField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.setField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) {
+ return super.clearField(field);
+ }
+
+ @java.lang.Override
+ public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) {
+ return super.clearOneof(oneof);
+ }
+
+ @java.lang.Override
+ public Builder setRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) {
+ return super.setRepeatedField(field, index, value);
+ }
+
+ @java.lang.Override
+ public Builder addRepeatedField(
+ com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) {
+ return super.addRepeatedField(field, value);
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(com.google.protobuf.Message other) {
+ if (other instanceof com.google.bigtable.admin.v2.StandardReadRemoteWrites) {
+ return mergeFrom((com.google.bigtable.admin.v2.StandardReadRemoteWrites) other);
+ } else {
+ super.mergeFrom(other);
+ return this;
+ }
+ }
+
+ public Builder mergeFrom(com.google.bigtable.admin.v2.StandardReadRemoteWrites other) {
+ if (other == com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance())
+ return this;
+ this.mergeUnknownFields(other.getUnknownFields());
+ onChanged();
+ return this;
+ }
+
+ @java.lang.Override
+ public final boolean isInitialized() {
+ return true;
+ }
+
+ @java.lang.Override
+ public Builder mergeFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws java.io.IOException {
+ if (extensionRegistry == null) {
+ throw new java.lang.NullPointerException();
+ }
+ try {
+ boolean done = false;
+ while (!done) {
+ int tag = input.readTag();
+ switch (tag) {
+ case 0:
+ done = true;
+ break;
+ default:
+ {
+ if (!super.parseUnknownField(input, extensionRegistry, tag)) {
+ done = true; // was an endgroup tag
+ }
+ break;
+ } // default:
+ } // switch (tag)
+ } // while (!done)
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.unwrapIOException();
+ } finally {
+ onChanged();
+ } // finally
+ return this;
+ }
+
+ @java.lang.Override
+ public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.setUnknownFields(unknownFields);
+ }
+
+ @java.lang.Override
+ public final Builder mergeUnknownFields(
+ final com.google.protobuf.UnknownFieldSet unknownFields) {
+ return super.mergeUnknownFields(unknownFields);
+ }
+
+ // @@protoc_insertion_point(builder_scope:google.bigtable.admin.v2.StandardReadRemoteWrites)
+ }
+
+ // @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.StandardReadRemoteWrites)
+ private static final com.google.bigtable.admin.v2.StandardReadRemoteWrites DEFAULT_INSTANCE;
+
+ static {
+ DEFAULT_INSTANCE = new com.google.bigtable.admin.v2.StandardReadRemoteWrites();
+ }
+
+ public static com.google.bigtable.admin.v2.StandardReadRemoteWrites getDefaultInstance() {
+ return DEFAULT_INSTANCE;
+ }
+
+ private static final com.google.protobuf.Parser PARSER =
+ new com.google.protobuf.AbstractParser() {
+ @java.lang.Override
+ public StandardReadRemoteWrites parsePartialFrom(
+ com.google.protobuf.CodedInputStream input,
+ com.google.protobuf.ExtensionRegistryLite extensionRegistry)
+ throws com.google.protobuf.InvalidProtocolBufferException {
+ Builder builder = newBuilder();
+ try {
+ builder.mergeFrom(input, extensionRegistry);
+ } catch (com.google.protobuf.InvalidProtocolBufferException e) {
+ throw e.setUnfinishedMessage(builder.buildPartial());
+ } catch (com.google.protobuf.UninitializedMessageException e) {
+ throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial());
+ } catch (java.io.IOException e) {
+ throw new com.google.protobuf.InvalidProtocolBufferException(e)
+ .setUnfinishedMessage(builder.buildPartial());
+ }
+ return builder.buildPartial();
+ }
+ };
+
+ public static com.google.protobuf.Parser parser() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.protobuf.Parser getParserForType() {
+ return PARSER;
+ }
+
+ @java.lang.Override
+ public com.google.bigtable.admin.v2.StandardReadRemoteWrites getDefaultInstanceForType() {
+ return DEFAULT_INSTANCE;
+ }
+}
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWritesOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWritesOrBuilder.java
new file mode 100644
index 0000000000..aff7f459f3
--- /dev/null
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWritesOrBuilder.java
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2024 Google LLC
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * https://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+// Generated by the protocol buffer compiler. DO NOT EDIT!
+// source: google/bigtable/admin/v2/bigtable_table_admin.proto
+
+// Protobuf Java Version: 3.25.2
+package com.google.bigtable.admin.v2;
+
+public interface StandardReadRemoteWritesOrBuilder
+ extends
+ // @@protoc_insertion_point(interface_extends:google.bigtable.admin.v2.StandardReadRemoteWrites)
+ com.google.protobuf.MessageOrBuilder {}
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto
index 9d5a38b073..9fe63a2738 100644
--- a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto
@@ -836,8 +836,30 @@ message CheckConsistencyRequest {
// Required. The token created using GenerateConsistencyToken for the Table.
string consistency_token = 2 [(google.api.field_behavior) = REQUIRED];
+
+ // Which type of read needs to consistently observe which type of write?
+ // Default: `standard_read_remote_writes`
+ oneof mode {
+ // Checks that reads using an app profile with `StandardIsolation` can
+ // see all writes committed before the token was created, even if the
+ // read and write target different clusters.
+ StandardReadRemoteWrites standard_read_remote_writes = 3;
+
+ // Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+ // can see all writes committed before the token was created, but only if
+ // the read and write target the same cluster.
+ DataBoostReadLocalWrites data_boost_read_local_writes = 4;
+ }
}
+// Checks that all writes before the consistency token was generated are
+// replicated in every cluster and readable.
+message StandardReadRemoteWrites {}
+
+// Checks that all writes before the consistency token was generated in the same
+// cluster are readable by Databoost.
+message DataBoostReadLocalWrites {}
+
// Response message for
// [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency]
message CheckConsistencyResponse {
diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/instance.proto b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/instance.proto
index 950d9f4880..fc7c2b7c93 100644
--- a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/instance.proto
+++ b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/instance.proto
@@ -297,6 +297,35 @@ message AppProfile {
Priority priority = 1;
}
+ // Data Boost is a serverless compute capability that lets you run
+ // high-throughput read jobs on your Bigtable data, without impacting the
+ // performance of the clusters that handle your application traffic.
+ // Currently, Data Boost exclusively supports read-only use-cases with
+ // single-cluster routing.
+ //
+ // Data Boost reads are only guaranteed to see the results of writes that
+ // were written at least 30 minutes ago. This means newly written values may
+ // not become visible for up to 30m, and also means that old values may
+ // remain visible for up to 30m after being deleted or overwritten. To
+ // mitigate the staleness of the data, users may either wait 30m, or use
+ // CheckConsistency.
+ message DataBoostIsolationReadOnly {
+ // Compute Billing Owner specifies how usage should be accounted when using
+ // Data Boost. Compute Billing Owner also configures which Cloud Project is
+ // charged for relevant quota.
+ enum ComputeBillingOwner {
+ // Unspecified value.
+ COMPUTE_BILLING_OWNER_UNSPECIFIED = 0;
+
+ // The host Cloud Project containing the targeted Bigtable Instance /
+ // Table pays for compute.
+ HOST_PAYS = 1;
+ }
+
+ // The Compute Billing Owner for this Data Boost App Profile.
+ optional ComputeBillingOwner compute_billing_owner = 1;
+ }
+
// The unique name of the app profile. Values are of the form
// `projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`.
string name = 1;
@@ -335,6 +364,10 @@ message AppProfile {
// The standard options used for isolating this app profile's traffic from
// other use cases.
StandardIsolation standard_isolation = 11;
+
+ // Specifies that this app profile is intended for read-only usage via the
+ // Data Boost feature.
+ DataBoostIsolationReadOnly data_boost_isolation_read_only = 10;
}
}
diff --git a/proto-google-cloud-bigtable-v2/pom.xml b/proto-google-cloud-bigtable-v2/pom.xml
index 0a30e75756..bd8e708c13 100644
--- a/proto-google-cloud-bigtable-v2/pom.xml
+++ b/proto-google-cloud-bigtable-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
proto-google-cloud-bigtable-v2
- 2.37.0
+ 2.38.0
proto-google-cloud-bigtable-v2
PROTO library for proto-google-cloud-bigtable-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.37.0
+ 2.38.0
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.37.0
+ 2.38.0
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.37.0
+ 2.38.0
pom
import
diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlags.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlags.java
index 8ec9e22fa2..aa9f34a669 100644
--- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlags.java
+++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlags.java
@@ -184,6 +184,24 @@ public boolean getRetryInfo() {
return retryInfo_;
}
+ public static final int CLIENT_SIDE_METRICS_ENABLED_FIELD_NUMBER = 8;
+ private boolean clientSideMetricsEnabled_ = false;
+ /**
+ *
+ *
+ *
+ * Notify the server that the client has client side metrics enabled.
+ *
+ *
+ * bool client_side_metrics_enabled = 8;
+ *
+ * @return The clientSideMetricsEnabled.
+ */
+ @java.lang.Override
+ public boolean getClientSideMetricsEnabled() {
+ return clientSideMetricsEnabled_;
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -216,6 +234,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (retryInfo_ != false) {
output.writeBool(7, retryInfo_);
}
+ if (clientSideMetricsEnabled_ != false) {
+ output.writeBool(8, clientSideMetricsEnabled_);
+ }
getUnknownFields().writeTo(output);
}
@@ -243,6 +264,9 @@ public int getSerializedSize() {
if (retryInfo_ != false) {
size += com.google.protobuf.CodedOutputStream.computeBoolSize(7, retryInfo_);
}
+ if (clientSideMetricsEnabled_ != false) {
+ size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, clientSideMetricsEnabled_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
@@ -264,6 +288,7 @@ public boolean equals(final java.lang.Object obj) {
if (getLastScannedRowResponses() != other.getLastScannedRowResponses()) return false;
if (getRoutingCookie() != other.getRoutingCookie()) return false;
if (getRetryInfo() != other.getRetryInfo()) return false;
+ if (getClientSideMetricsEnabled() != other.getClientSideMetricsEnabled()) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@@ -287,6 +312,8 @@ public int hashCode() {
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRoutingCookie());
hash = (37 * hash) + RETRY_INFO_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRetryInfo());
+ hash = (37 * hash) + CLIENT_SIDE_METRICS_ENABLED_FIELD_NUMBER;
+ hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getClientSideMetricsEnabled());
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -438,6 +465,7 @@ public Builder clear() {
lastScannedRowResponses_ = false;
routingCookie_ = false;
retryInfo_ = false;
+ clientSideMetricsEnabled_ = false;
return this;
}
@@ -491,6 +519,9 @@ private void buildPartial0(com.google.bigtable.v2.FeatureFlags result) {
if (((from_bitField0_ & 0x00000020) != 0)) {
result.retryInfo_ = retryInfo_;
}
+ if (((from_bitField0_ & 0x00000040) != 0)) {
+ result.clientSideMetricsEnabled_ = clientSideMetricsEnabled_;
+ }
}
@java.lang.Override
@@ -556,6 +587,9 @@ public Builder mergeFrom(com.google.bigtable.v2.FeatureFlags other) {
if (other.getRetryInfo() != false) {
setRetryInfo(other.getRetryInfo());
}
+ if (other.getClientSideMetricsEnabled() != false) {
+ setClientSideMetricsEnabled(other.getClientSideMetricsEnabled());
+ }
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
@@ -618,6 +652,12 @@ public Builder mergeFrom(
bitField0_ |= 0x00000020;
break;
} // case 56
+ case 64:
+ {
+ clientSideMetricsEnabled_ = input.readBool();
+ bitField0_ |= 0x00000040;
+ break;
+ } // case 64
default:
{
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
@@ -979,6 +1019,59 @@ public Builder clearRetryInfo() {
return this;
}
+ private boolean clientSideMetricsEnabled_;
+ /**
+ *
+ *
+ *
+ * Notify the server that the client has client side metrics enabled.
+ *
+ *
+ * bool client_side_metrics_enabled = 8;
+ *
+ * @return The clientSideMetricsEnabled.
+ */
+ @java.lang.Override
+ public boolean getClientSideMetricsEnabled() {
+ return clientSideMetricsEnabled_;
+ }
+ /**
+ *
+ *
+ *
+ * Notify the server that the client has client side metrics enabled.
+ *
+ *
+ * bool client_side_metrics_enabled = 8;
+ *
+ * @param value The clientSideMetricsEnabled to set.
+ * @return This builder for chaining.
+ */
+ public Builder setClientSideMetricsEnabled(boolean value) {
+
+ clientSideMetricsEnabled_ = value;
+ bitField0_ |= 0x00000040;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * Notify the server that the client has client side metrics enabled.
+ *
+ *
+ * bool client_side_metrics_enabled = 8;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearClientSideMetricsEnabled() {
+ bitField0_ = (bitField0_ & ~0x00000040);
+ clientSideMetricsEnabled_ = false;
+ onChanged();
+ return this;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsOrBuilder.java
index 4e5e3c3f6a..f58d5c96d8 100644
--- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsOrBuilder.java
+++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsOrBuilder.java
@@ -109,4 +109,17 @@ public interface FeatureFlagsOrBuilder
* @return The retryInfo.
*/
boolean getRetryInfo();
+
+ /**
+ *
+ *
+ *
+ * Notify the server that the client has client side metrics enabled.
+ *
+ *
+ * bool client_side_metrics_enabled = 8;
+ *
+ * @return The clientSideMetricsEnabled.
+ */
+ boolean getClientSideMetricsEnabled();
}
diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsProto.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsProto.java
index 0bd27e36b5..30301d352a 100644
--- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsProto.java
+++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsProto.java
@@ -42,17 +42,18 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
static {
java.lang.String[] descriptorData = {
"\n&google/bigtable/v2/feature_flags.proto"
- + "\022\022google.bigtable.v2\"\266\001\n\014FeatureFlags\022\025\n"
+ + "\022\022google.bigtable.v2\"\333\001\n\014FeatureFlags\022\025\n"
+ "\rreverse_scans\030\001 \001(\010\022\036\n\026mutate_rows_rate"
+ "_limit\030\003 \001(\010\022\037\n\027mutate_rows_rate_limit2\030"
+ "\005 \001(\010\022\"\n\032last_scanned_row_responses\030\004 \001("
+ "\010\022\026\n\016routing_cookie\030\006 \001(\010\022\022\n\nretry_info\030"
- + "\007 \001(\010B\275\001\n\026com.google.bigtable.v2B\021Featur"
- + "eFlagsProtoP\001Z:google.golang.org/genprot"
- + "o/googleapis/bigtable/v2;bigtable\252\002\030Goog"
- + "le.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigt"
- + "able\\V2\352\002\033Google::Cloud::Bigtable::V2b\006p"
- + "roto3"
+ + "\007 \001(\010\022#\n\033client_side_metrics_enabled\030\010 \001"
+ + "(\010B\275\001\n\026com.google.bigtable.v2B\021FeatureFl"
+ + "agsProtoP\001Z:google.golang.org/genproto/g"
+ + "oogleapis/bigtable/v2;bigtable\252\002\030Google."
+ + "Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtabl"
+ + "e\\V2\352\002\033Google::Cloud::Bigtable::V2b\006prot"
+ + "o3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -69,6 +70,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"LastScannedRowResponses",
"RoutingCookie",
"RetryInfo",
+ "ClientSideMetricsEnabled",
});
}
diff --git a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/feature_flags.proto b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/feature_flags.proto
index d3128c5c67..bfce3180fe 100644
--- a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/feature_flags.proto
+++ b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/feature_flags.proto
@@ -58,4 +58,7 @@ message FeatureFlags {
// Notify the server that the client supports using retry info back off
// durations to retry requests with.
bool retry_info = 7;
+
+ // Notify the server that the client has client side metrics enabled.
+ bool client_side_metrics_enabled = 8;
}
diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml
index 48f9dd3756..473ddf3d26 100644
--- a/samples/install-without-bom/pom.xml
+++ b/samples/install-without-bom/pom.xml
@@ -25,13 +25,15 @@
+
com.google.cloud
google-cloud-bigtable
- 2.36.0
+ 2.38.0
+
junit
diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml
index 60c3dd1b29..07dede2ada 100644
--- a/samples/snapshot/pom.xml
+++ b/samples/snapshot/pom.xml
@@ -28,7 +28,7 @@
com.google.cloud
google-cloud-bigtable
- 2.37.0
+ 2.38.0
diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml
index 5a040c9b8c..807f1cecd9 100644
--- a/samples/snippets/pom.xml
+++ b/samples/snippets/pom.xml
@@ -23,14 +23,13 @@
UTF-8
-
com.google.cloud
libraries-bom
- 26.25.0
+ 26.37.0
pom
import
diff --git a/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java b/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java
new file mode 100644
index 0000000000..8f3047442b
--- /dev/null
+++ b/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java
@@ -0,0 +1,322 @@
+/*
+ * Copyright 2024 Google Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.example.bigtable;
+
+import static com.google.cloud.bigtable.data.v2.models.Filters.FILTERS;
+
+import com.google.api.gax.rpc.NotFoundException;
+import com.google.api.gax.rpc.PermissionDeniedException;
+import com.google.api.gax.rpc.ServerStream;
+import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient;
+import com.google.cloud.bigtable.admin.v2.BigtableTableAdminSettings;
+import com.google.cloud.bigtable.admin.v2.models.AuthorizedView;
+import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest;
+import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest;
+import com.google.cloud.bigtable.admin.v2.models.FamilySubsets;
+import com.google.cloud.bigtable.admin.v2.models.SubsetView;
+import com.google.cloud.bigtable.admin.v2.models.Table;
+import com.google.cloud.bigtable.admin.v2.models.UpdateAuthorizedViewRequest;
+import com.google.cloud.bigtable.data.v2.BigtableDataClient;
+import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
+import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId;
+import com.google.cloud.bigtable.data.v2.models.Filters.Filter;
+import com.google.cloud.bigtable.data.v2.models.Query;
+import com.google.cloud.bigtable.data.v2.models.Row;
+import com.google.cloud.bigtable.data.v2.models.RowCell;
+import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.protobuf.ByteString;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+public class AuthorizedViewExample {
+
+ private static final String COLUMN_FAMILY = "cf";
+ private static final String COLUMN_QUALIFIER_GREETING = "greeting";
+ private static final String COLUMN_QUALIFIER_NAME = "name";
+ private static final String ROW_KEY_PREFIX = "rowKey";
+ private final String tableId;
+ private final String authorizedViewId;
+ private final BigtableTableAdminClient adminClient;
+ private final BigtableDataClient dataClient;
+
+ public static void main(String[] args) throws IOException {
+
+ if (args.length != 2) {
+ System.out.println("Missing required project id or instance id");
+ return;
+ }
+ String projectId = args[0];
+ String instanceId = args[1];
+
+ AuthorizedViewExample authorizedViewExample =
+ new AuthorizedViewExample(projectId, instanceId, "test-table", "test-authorized-view");
+ authorizedViewExample.run();
+ }
+
+ public AuthorizedViewExample(
+ String projectId, String instanceId, String tableId, String authorizedViewId)
+ throws IOException {
+ this.tableId = tableId;
+ this.authorizedViewId = authorizedViewId;
+
+ // Creates the settings to configure a bigtable data client.
+ BigtableDataSettings settings =
+ BigtableDataSettings.newBuilder().setProjectId(projectId).setInstanceId(instanceId).build();
+
+ // Creates a bigtable data client.
+ dataClient = BigtableDataClient.create(settings);
+
+ // Creates the settings to configure a bigtable table admin client.
+ BigtableTableAdminSettings adminSettings =
+ BigtableTableAdminSettings.newBuilder()
+ .setProjectId(projectId)
+ .setInstanceId(instanceId)
+ .build();
+
+ // Creates a bigtable table admin client.
+ adminClient = BigtableTableAdminClient.create(adminSettings);
+ }
+
+ public void close() {
+ dataClient.close();
+ adminClient.close();
+ }
+
+ public void run() {
+ createTable();
+ createAuthorizedView();
+ updateAuthorizedView();
+ getAuthorizedView();
+ listAllAuthorizedViews();
+ writeToAuthorizedView();
+ readSingleRowFromAuthorizedView();
+ readRowsWithFilterFromAuthorizedView();
+ deleteAuthorizedView();
+ deleteTable();
+ close();
+ }
+
+ public void createTable() {
+ // Checks if table exists, creates table if it does not exist.
+ if (!adminClient.exists(tableId)) {
+ System.out.println("Table does not exist, creating table: " + tableId);
+ CreateTableRequest createTableRequest =
+ CreateTableRequest.of(tableId).addFamily(COLUMN_FAMILY);
+ Table table = adminClient.createTable(createTableRequest);
+ System.out.printf("Table: %s created successfully%n", table.getId());
+ }
+ }
+
+ public void deleteTable() {
+ // Deletes the entire table.
+ System.out.println("\nDelete table: " + tableId);
+ try {
+ adminClient.deleteTable(tableId);
+ System.out.printf("Table: %s deleted successfully%n", tableId);
+ } catch (NotFoundException e) {
+ System.err.println("Failed to delete a non-existent table: " + e.getMessage());
+ }
+ }
+
+ /**
+ * Demonstrates how to create an authorized view under a table with the specified configuration.
+ */
+ public void createAuthorizedView() {
+ // Checks if the authorized view exists, creates it if it does not exist.
+ try {
+ adminClient.getAuthorizedView(tableId, authorizedViewId);
+ } catch (NotFoundException exception) {
+ System.out.printf("%nCreating authorized view %s in table %s%n", authorizedViewId, tableId);
+ // [START bigtable_create_authorized_view]
+ try {
+ CreateAuthorizedViewRequest request =
+ CreateAuthorizedViewRequest.of(tableId, authorizedViewId)
+ .setAuthorizedViewType(
+ SubsetView.create()
+ .addRowPrefix("")
+ .setFamilySubsets(
+ COLUMN_FAMILY,
+ FamilySubsets.create().addQualifierPrefix(COLUMN_QUALIFIER_NAME)));
+ AuthorizedView authorizedView = adminClient.createAuthorizedView(request);
+ System.out.printf("AuthorizedView: %s created successfully%n", authorizedView.getId());
+ } catch (NotFoundException e) {
+ System.err.println(
+ "Failed to create an authorized view from a non-existent table: " + e.getMessage());
+ }
+ // [END bigtable_create_authorized_view]
+ }
+ }
+
+ /** Demonstrates how to modify an authorized view. */
+ public void updateAuthorizedView() {
+ System.out.printf("%nUpdating authorized view %s in table %s%n", authorizedViewId, tableId);
+ // [START bigtable_update_authorized_view]
+ try {
+ // Update to an authorized view permitting everything.
+ UpdateAuthorizedViewRequest request =
+ UpdateAuthorizedViewRequest.of(tableId, authorizedViewId)
+ .setAuthorizedViewType(
+ SubsetView.create()
+ .addRowPrefix("")
+ .setFamilySubsets(
+ COLUMN_FAMILY, FamilySubsets.create().addQualifierPrefix("")));
+ AuthorizedView authorizedView = adminClient.updateAuthorizedView(request);
+ System.out.printf("AuthorizedView: %s updated successfully%n", authorizedView.getId());
+ } catch (NotFoundException e) {
+ System.err.println("Failed to modify a non-existent authorized view: " + e.getMessage());
+ }
+ // [END bigtable_update_authorized_view]
+ }
+
+ /** Demonstrates how to get an authorized view's metadata. */
+ public AuthorizedView getAuthorizedView() {
+ System.out.printf("%nGetting authorized view %s in table %s%n", authorizedViewId, tableId);
+ // [START bigtable_get_authorized_view]
+ AuthorizedView authorizedView = null;
+ try {
+ authorizedView = adminClient.getAuthorizedView(tableId, authorizedViewId);
+ SubsetView subsetView = (SubsetView) authorizedView.getAuthorizedViewType();
+
+ for (ByteString rowPrefix : subsetView.getRowPrefixes()) {
+ System.out.printf("Row Prefix: %s%n", rowPrefix.toStringUtf8());
+ }
+ for (Map.Entry entry : subsetView.getFamilySubsets().entrySet()) {
+ for (ByteString qualifierPrefix : entry.getValue().getQualifierPrefixes()) {
+ System.out.printf(
+ "Column Family: %s, Qualifier Prefix: %s%n",
+ entry.getKey(), qualifierPrefix.toStringUtf8());
+ }
+ for (ByteString qualifier : entry.getValue().getQualifiers()) {
+ System.out.printf(
+ "Column Family: %s, Qualifier: %s%n", entry.getKey(), qualifier.toStringUtf8());
+ }
+ }
+ } catch (NotFoundException e) {
+ System.err.println(
+ "Failed to retrieve metadata from a non-existent authorized view: " + e.getMessage());
+ }
+ // [END bigtable_get_authorized_view]
+ return authorizedView;
+ }
+
+ /** Demonstrates how to list all authorized views within a table. */
+ public List listAllAuthorizedViews() {
+ System.out.printf("%nListing authorized views in table %s%n", tableId);
+ // [START bigtable_list_authorized_views]
+ List authorizedViewIds = new ArrayList<>();
+ try {
+ authorizedViewIds = adminClient.listAuthorizedViews(tableId);
+ for (String authorizedViewId : authorizedViewIds) {
+ System.out.println(authorizedViewId);
+ }
+ } catch (NotFoundException e) {
+ System.err.println(
+ "Failed to list authorized views from a non-existent table: " + e.getMessage());
+ }
+ // [END bigtable_list_authorized_views]
+ return authorizedViewIds;
+ }
+
+ /** Demonstrates how to delete an authorized view. */
+ public void deleteAuthorizedView() {
+ System.out.printf("%nDeleting authorized view %s in table %s%n", authorizedViewId, tableId);
+ // [START bigtable_delete_authorized_view]
+ try {
+ adminClient.deleteAuthorizedView(tableId, authorizedViewId);
+ System.out.printf("AuthorizedView: %s deleted successfully%n", authorizedViewId);
+ } catch (NotFoundException e) {
+ System.err.println("Failed to delete a non-existent authorized view: " + e.getMessage());
+ }
+ // [END bigtable_delete_authorized_view]
+ }
+
+ /** Demonstrates how to write some rows to an authorized view. */
+ public void writeToAuthorizedView() {
+ // [START bigtable_authorized_view_write_rows]
+ try {
+ System.out.println("\nWriting to authorized view");
+ String[] names = {"World", "Bigtable", "Java"};
+ for (int i = 0; i < names.length; i++) {
+ String greeting = "Hello " + names[i] + "!";
+ RowMutation rowMutation =
+ RowMutation.create(AuthorizedViewId.of(tableId, authorizedViewId), ROW_KEY_PREFIX + i)
+ .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_NAME, names[i])
+ .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_GREETING, greeting);
+ dataClient.mutateRow(rowMutation);
+ System.out.println(greeting);
+ }
+ } catch (Exception e) {
+ if (e instanceof NotFoundException) {
+ System.err.println("Failed to write to non-existent authorized view: " + e.getMessage());
+ } else if (e instanceof PermissionDeniedException) {
+ System.err.println(
+ "Failed to apply mutations outside of the authorized view: " + e.getMessage());
+ }
+ }
+ // [END bigtable_authorized_view_write_rows]
+ }
+
+ /** Demonstrates how to read a single row from an authorized view. */
+ public Row readSingleRowFromAuthorizedView() {
+ // [START bigtable_authorized_view_get_by_key]
+ try {
+ System.out.println("\nReading a single row by row key from an authorized view");
+ Row row =
+ dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), ROW_KEY_PREFIX + 0);
+ System.out.println("Row: " + row.getKey().toStringUtf8());
+ for (RowCell cell : row.getCells()) {
+ System.out.printf(
+ "Family: %s Qualifier: %s Value: %s%n",
+ cell.getFamily(), cell.getQualifier().toStringUtf8(), cell.getValue().toStringUtf8());
+ }
+ return row;
+ } catch (NotFoundException e) {
+ System.err.println("Failed to read from a non-existent authorized view: " + e.getMessage());
+ return null;
+ }
+ // [END bigtable_authorized_view_get_by_key]
+ }
+
+ /** Demonstrates how to read rows from an authorized view with a filter. */
+ public List readRowsWithFilterFromAuthorizedView() {
+ // [START bigtable_authorized_view_scan_with_filter]
+ try {
+ // A filter that matches only the most recent cell within each column
+ Filter filter = FILTERS.limit().cellsPerColumn(1);
+ System.out.println("\nScanning authorized view with filter");
+ Query query = Query.create(AuthorizedViewId.of(tableId, authorizedViewId)).filter(filter);
+ ServerStream rowStream = dataClient.readRows(query);
+ List authorizedViewRows = new ArrayList<>();
+ for (Row r : rowStream) {
+ System.out.println("Row Key: " + r.getKey().toStringUtf8());
+ authorizedViewRows.add(r);
+ for (RowCell cell : r.getCells()) {
+ System.out.printf(
+ "Family: %s Qualifier: %s Value: %s%n",
+ cell.getFamily(), cell.getQualifier().toStringUtf8(), cell.getValue().toStringUtf8());
+ }
+ }
+ return authorizedViewRows;
+ } catch (NotFoundException e) {
+ System.err.println("Failed to read a non-existent authorized view: " + e.getMessage());
+ return null;
+ }
+ // [END bigtable_authorized_view_scan_with_filter]
+ }
+}
diff --git a/samples/snippets/src/main/java/com/example/bigtable/Filters.java b/samples/snippets/src/main/java/com/example/bigtable/Filters.java
index 54f3282573..c27437da58 100644
--- a/samples/snippets/src/main/java/com/example/bigtable/Filters.java
+++ b/samples/snippets/src/main/java/com/example/bigtable/Filters.java
@@ -26,6 +26,7 @@
import com.google.cloud.bigtable.data.v2.models.Query;
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowCell;
+import com.google.cloud.bigtable.data.v2.models.TableId;
import java.io.IOException;
import java.time.Instant;
import java.time.temporal.ChronoUnit;
@@ -360,7 +361,7 @@ private static void readFilter(
// once, and can be reused for multiple requests. After completing all of your requests, call
// the "close" method on the client to safely clean up any remaining background resources.
try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) {
- Query query = Query.create(tableId).filter(filter);
+ Query query = Query.create(TableId.of(tableId)).filter(filter);
ServerStream rows = dataClient.readRows(query);
for (Row row : rows) {
printRow(row);
diff --git a/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java b/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java
index 724985ce22..99bc25735d 100644
--- a/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java
+++ b/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java
@@ -32,6 +32,7 @@
import com.google.cloud.bigtable.data.v2.models.Row;
import com.google.cloud.bigtable.data.v2.models.RowCell;
import com.google.cloud.bigtable.data.v2.models.RowMutation;
+import com.google.cloud.bigtable.data.v2.models.TableId;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
@@ -138,7 +139,7 @@ public void writeToTable() {
for (int i = 0; i < names.length; i++) {
String greeting = "Hello " + names[i] + "!";
RowMutation rowMutation =
- RowMutation.create(tableId, ROW_KEY_PREFIX + i)
+ RowMutation.create(TableId.of(tableId), ROW_KEY_PREFIX + i)
.setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_NAME, names[i])
.setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_GREETING, greeting);
dataClient.mutateRow(rowMutation);
@@ -155,7 +156,7 @@ public Row readSingleRow() {
// [START bigtable_hw_get_by_key]
try {
System.out.println("\nReading a single row by row key");
- Row row = dataClient.readRow(tableId, ROW_KEY_PREFIX + 0);
+ Row row = dataClient.readRow(TableId.of(tableId), ROW_KEY_PREFIX + 0);
System.out.println("Row: " + row.getKey().toStringUtf8());
for (RowCell cell : row.getCells()) {
System.out.printf(
@@ -175,7 +176,7 @@ public List readSpecificCells() {
// [START bigtable_hw_get_by_key]
try {
System.out.println("\nReading specific cells by family and qualifier");
- Row row = dataClient.readRow(tableId, ROW_KEY_PREFIX + 0);
+ Row row = dataClient.readRow(TableId.of(tableId), ROW_KEY_PREFIX + 0);
System.out.println("Row: " + row.getKey().toStringUtf8());
List cells = row.getCells(COLUMN_FAMILY, COLUMN_QUALIFIER_NAME);
for (RowCell cell : cells) {
@@ -196,7 +197,7 @@ public List readTable() {
// [START bigtable_hw_scan_all]
try {
System.out.println("\nReading the entire table");
- Query query = Query.create(tableId);
+ Query query = Query.create(TableId.of(tableId));
ServerStream rowStream = dataClient.readRows(query);
List tableRows = new ArrayList<>();
for (Row r : rowStream) {
@@ -229,7 +230,7 @@ public void filterLimitCellsPerCol(String tableId) {
private void readRowFilter(String tableId, Filter filter) {
String rowKey =
Base64.getEncoder().encodeToString("greeting0".getBytes(StandardCharsets.UTF_8));
- Row row = dataClient.readRow(tableId, rowKey, filter);
+ Row row = dataClient.readRow(TableId.of(tableId), rowKey, filter);
printRow(row);
System.out.println("Row filter completed.");
}
@@ -237,7 +238,7 @@ private void readRowFilter(String tableId, Filter filter) {
// [START bigtable_hw_scan_with_filter]
private void readFilter(String tableId, Filter filter) {
- Query query = Query.create(tableId).filter(filter);
+ Query query = Query.create(TableId.of(tableId)).filter(filter);
ServerStream