diff --git a/.readme-partials.yml b/.readme-partials.yml index dc9a9e2377..c9386d33fb 100644 --- a/.readme-partials.yml +++ b/.readme-partials.yml @@ -115,7 +115,7 @@ custom_content: | TIP: If you are experiencing version conflicts with gRPC, see [Version Conflicts](#version-conflicts). - ## Enabling client side metrics + ## Client side metrics Cloud Bigtable client supports publishing client side metrics to [Cloud Monitoring](https://cloud.google.com/monitoring/docs/monitoring-overview) under the @@ -124,6 +124,31 @@ custom_content: | This feature is available once you upgrade to version 2.16.0 and above. Follow the guide on https://cloud.google.com/bigtable/docs/client-side-metrics-setup to enable. + Since version 2.38.0, [client side metrics](https://cloud.google.com/bigtable/docs/client-side-metrics) + is enabled by default. This feature collects useful telemetry data in the client and is recommended to + use in conjunction with server-side metrics to get a complete, actionable view of your Bigtable + performance. There is no additional cost to publish and view client-side metrics + in Cloud Monitoring. + + ### Opt-out client side metrics + + You can opt-out client side metrics with the following settings: + + ```java + BigtableDataSettings settings = BigtableDataSettings.newBuilder() + .setProjectId("my-project") + .setInstanceId("my-instance") + .setMetricsProvider(NoopMetricsProvider.INSTANCE) + .build(); + ``` + + ### Use a custom OpenTelemetry instance + + If your application already has OpenTelemetry integration, you can register client side metrics on + your OpenTelemetry instance. You can refer to + [CustomOpenTelemetryMetricsProvider](https://github.com/googleapis/java-bigtable/blob/main/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java) + on how to set it up. + ## Client request tracing: OpenCensus Tracing Cloud Bigtable client supports [OpenCensus Tracing](https://opencensus.io/tracing/), @@ -138,13 +163,13 @@ custom_content: | io.opencensus opencensus-impl - 0.24.0 + 0.31.1 runtime io.opencensus opencensus-exporter-trace-stackdriver - 0.24.0 + 0.31.1 io.grpc @@ -197,140 +222,46 @@ custom_content: | ); ``` - ## Enabling Cloud Bigtable Metrics: OpenCensus Stats - - --- - Note: We recommend [enabling client side built-in metrics](#enabling-client-side-metrics) - if you want to view your metrics on cloud monitoring. This integration is only for exporting the - metrics to a third party dashboard. - --- - - Cloud Bigtable client supports [Opencensus Metrics](https://opencensus.io/stats/), - which gives insight into the client internals and aids in debugging production issues. - All Cloud Bigtable Metrics are prefixed with `cloud.google.com/java/bigtable/`. The - metrics will be tagged with: - * `bigtable_project_id`: the project that contains the target Bigtable instance. - Please note that this id could be different from project that the client is running - in and different from the project where the metrics are exported to. - * `bigtable_instance_id`: the instance id of the target Bigtable instance - * `bigtable_app_profile_id`: the app profile id that is being used to access the target - Bigtable instance - - ### Available operation level metric views: - - * `cloud.google.com/java/bigtable/op_latency`: A distribution of latency of - each client method call, across all of it's RPC attempts. Tagged by - operation name and final response status. - - * `cloud.google.com/java/bigtable/completed_ops`: The total count of - method invocations. Tagged by operation name and final response status. - - * `cloud.google.com/java/bigtable/read_rows_first_row_latency`: A - distribution of the latency of receiving the first row in a ReadRows - operation. - - * `cloud.google.com/java/bigtable/attempt_latency`: A distribution of latency of - each client RPC, tagged by operation name and the attempt status. Under normal - circumstances, this will be identical to op_latency. However, when the client - receives transient errors, op_latency will be the sum of all attempt_latencies - and the exponential delays. + ### Disable Bigtbale traces - * `cloud.google.com/java/bigtable/attempts_per_op`: A distribution of attempts that - each operation required, tagged by operation name and final operation status. - Under normal circumstances, this will be 1. - - #### GFE metric views: - * `cloud.google.com/java/bigtable/gfe_latency`: A distribution of the latency - between Google's network receives an RPC and reads back the first byte of - the response. - - * `cloud.google.com/java/bigtable/gfe_header_missing_count`: A counter of the - number of RPC responses received without the server-timing header, which - indicates that the request probably never reached Google's network. - - By default, the functionality is disabled. For example to enable metrics using - [Google Stackdriver](https://cloud.google.com/monitoring/docs/): - - - [//]: # (TODO: figure out how to keep opencensus version in sync with pom.xml) - - If you are using Maven, add this to your pom.xml file - ```xml - - io.opencensus - opencensus-impl - 0.24.0 - runtime - - - io.opencensus - opencensus-exporter-stats-stackdriver - 0.24.0 - - - io.grpc - * - - - com.google.auth - * - - - - ``` - If you are using Gradle, add this to your dependencies - ```Groovy - compile 'io.opencensus:opencensus-impl:0.24.0' - compile 'io.opencensus:opencensus-exporter-stats-stackdriver:0.24.0' - ``` - If you are using SBT, add this to your dependencies - ```Scala - libraryDependencies += "io.opencensus" % "opencensus-impl" % "0.24.0" - libraryDependencies += "io.opencensus" % "opencensus-exporter-stats-stackdriver" % "0.24.0" - ``` - - At the start of your application configure the exporter and enable the Bigtable stats views: + If your application already has OpenCensus Tracing integration and you want to disable Bigtable + traces, you can do the following: ```java - import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration; - import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter; - - StackdriverStatsExporter.createAndRegister( - StackdriverStatsConfiguration.builder() - .setProjectId("YOUR_PROJECT_ID") - .build() - ); - - BigtableDataSettings.enableOpenCensusStats(); - // Enable GFE metric views - BigtableDataSettings.enableGfeOpenCensusStats(); + public static class MySampler extends Sampler { + + private final Sampler childSampler; + + MySampler(Sampler child) { + this.childSampler = child; + } + + @Override + public boolean shouldSample(@Nullable SpanContext parentContext, + @Nullable Boolean hasRemoteParent, + TraceId traceId, + SpanId spanId, + String name, + List parentLinks) { + if (name.contains("Bigtable")) { + return false; + } + return childSampler.shouldSample(parentContext, hasRemoteParent, traceId, spanId, name, parentLinks); + } + + @Override + public String getDescription() { + return "from my sampler"; + } + } ``` - You can view the metrics on the Google Cloud Platform Console - [Metrics explorer](https://console.cloud.google.com/monitoring/metrics-explorer) - page. - - You can configure how frequently metrics are pushed to StackDriver and the - [Monitored resource type](https://cloud.google.com/monitoring/api/resources) by - updating `StackdriverStatsConfiguration`: - - ``` java - // Example: configuring export interval and monitored resource type - StackdriverStatsExporter.createAndRegister( - StackdriverStatsConfiguration.builder() - .setProjectId("YOUR_PROJECT_ID") - // Exporting metrics every 10 seconds - .setExportInterval(Duration.create(10, 0)) - // Configure monitored resource type. A common practice is to use the - // monitored resource objects that represent the physical resources - // where your application code is running. See the full list of - // monitored resource type here: - // https://cloud.google.com/monitoring/api/resources - .setMonitoredResource(MonitoredResource.newBuilder() - .setType("global") - .putLabels("project_id", "YOUR_PROJECT_ID") - .build()) - .build() + And use this sampler in your trace config: + ```java + Tracing.getTraceConfig().updateActiveTraceParams( + Tracing.getTraceConfig().getActiveTraceParams().toBuilder() + .setSampler(new MySampler(Samplers.probabilitySampler(0.1))) + .build() ); ``` diff --git a/CHANGELOG.md b/CHANGELOG.md index d8d587a3af..1847a8898a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,20 @@ # Changelog +## [2.38.0](https://github.com/googleapis/java-bigtable/compare/v2.37.0...v2.38.0) (2024-04-15) + + +### Features + +* Add Data Boost configurations to admin API ([f29c5bb](https://github.com/googleapis/java-bigtable/commit/f29c5bba08daffe2721454db1714f6ea6f47fc66)) +* Add feature flag for client side metrics ([#2179](https://github.com/googleapis/java-bigtable/issues/2179)) ([f29c5bb](https://github.com/googleapis/java-bigtable/commit/f29c5bba08daffe2721454db1714f6ea6f47fc66)) +* Migrate to OTEL and enable metrics by default ([#2166](https://github.com/googleapis/java-bigtable/issues/2166)) ([1682939](https://github.com/googleapis/java-bigtable/commit/168293937cc7f438a3ec2dee46805aa8e12089c4)) + + +### Bug Fixes + +* Add more error handling ([#2203](https://github.com/googleapis/java-bigtable/issues/2203)) ([c2a63f7](https://github.com/googleapis/java-bigtable/commit/c2a63f7627f2aa6e2e51ec3e185abfa5234ad3e4)) +* Fix export to log detect resource errors ([#2197](https://github.com/googleapis/java-bigtable/issues/2197)) ([d32fbb7](https://github.com/googleapis/java-bigtable/commit/d32fbb78bbde2ad04103ab7b2c1176a6df72d0a3)) + ## [2.37.0](https://github.com/googleapis/java-bigtable/compare/v2.36.0...v2.37.0) (2024-03-27) diff --git a/README.md b/README.md index a57ff48bf5..0bc7ecb0b4 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: com.google.cloud libraries-bom - 26.25.0 + 26.37.0 pom import @@ -42,7 +42,7 @@ If you are using Maven without the BOM, add this to your dependencies: com.google.cloud google-cloud-bigtable - 2.36.0 + 2.38.0 ``` @@ -50,20 +50,20 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.34.0') +implementation platform('com.google.cloud:libraries-bom:26.37.0') implementation 'com.google.cloud:google-cloud-bigtable' ``` If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-bigtable:2.36.0' +implementation 'com.google.cloud:google-cloud-bigtable:2.37.0' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-bigtable" % "2.36.0" +libraryDependencies += "com.google.cloud" % "google-cloud-bigtable" % "2.37.0" ``` @@ -215,7 +215,7 @@ try { TIP: If you are experiencing version conflicts with gRPC, see [Version Conflicts](#version-conflicts). -## Enabling client side metrics +## Client side metrics Cloud Bigtable client supports publishing client side metrics to [Cloud Monitoring](https://cloud.google.com/monitoring/docs/monitoring-overview) under the @@ -224,6 +224,31 @@ Cloud Bigtable client supports publishing client side metrics to This feature is available once you upgrade to version 2.16.0 and above. Follow the guide on https://cloud.google.com/bigtable/docs/client-side-metrics-setup to enable. +Since version 2.38.0, [client side metrics](https://cloud.google.com/bigtable/docs/client-side-metrics) +is enabled by default. This feature collects useful telemetry data in the client and is recommended to +use in conjunction with server-side metrics to get a complete, actionable view of your Bigtable +performance. There is no additional cost to publish and view client-side metrics +in Cloud Monitoring. + +### Opt-out client side metrics + +You can opt-out client side metrics with the following settings: + +```java +BigtableDataSettings settings = BigtableDataSettings.newBuilder() + .setProjectId("my-project") + .setInstanceId("my-instance") + .setMetricsProvider(NoopMetricsProvider.INSTANCE) + .build(); +``` + +### Use a custom OpenTelemetry instance + +If your application already has OpenTelemetry integration, you can register client side metrics on +your OpenTelemetry instance. You can refer to +[CustomOpenTelemetryMetricsProvider](https://github.com/googleapis/java-bigtable/blob/main/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java) +on how to set it up. + ## Client request tracing: OpenCensus Tracing Cloud Bigtable client supports [OpenCensus Tracing](https://opencensus.io/tracing/), @@ -238,13 +263,13 @@ If you are using Maven, add this to your pom.xml file io.opencensus opencensus-impl - 0.24.0 + 0.31.1 runtime io.opencensus opencensus-exporter-trace-stackdriver - 0.24.0 + 0.31.1 io.grpc @@ -297,140 +322,46 @@ Tracing.getTraceConfig().updateActiveTraceParams( ); ``` -## Enabling Cloud Bigtable Metrics: OpenCensus Stats - ---- -Note: We recommend [enabling client side built-in metrics](#enabling-client-side-metrics) -if you want to view your metrics on cloud monitoring. This integration is only for exporting the -metrics to a third party dashboard. ---- - -Cloud Bigtable client supports [Opencensus Metrics](https://opencensus.io/stats/), -which gives insight into the client internals and aids in debugging production issues. -All Cloud Bigtable Metrics are prefixed with `cloud.google.com/java/bigtable/`. The -metrics will be tagged with: - * `bigtable_project_id`: the project that contains the target Bigtable instance. - Please note that this id could be different from project that the client is running - in and different from the project where the metrics are exported to. -* `bigtable_instance_id`: the instance id of the target Bigtable instance -* `bigtable_app_profile_id`: the app profile id that is being used to access the target - Bigtable instance - -### Available operation level metric views: - -* `cloud.google.com/java/bigtable/op_latency`: A distribution of latency of - each client method call, across all of it's RPC attempts. Tagged by - operation name and final response status. - -* `cloud.google.com/java/bigtable/completed_ops`: The total count of - method invocations. Tagged by operation name and final response status. - -* `cloud.google.com/java/bigtable/read_rows_first_row_latency`: A - distribution of the latency of receiving the first row in a ReadRows - operation. - -* `cloud.google.com/java/bigtable/attempt_latency`: A distribution of latency of - each client RPC, tagged by operation name and the attempt status. Under normal - circumstances, this will be identical to op_latency. However, when the client - receives transient errors, op_latency will be the sum of all attempt_latencies - and the exponential delays. +### Disable Bigtbale traces -* `cloud.google.com/java/bigtable/attempts_per_op`: A distribution of attempts that - each operation required, tagged by operation name and final operation status. - Under normal circumstances, this will be 1. - -#### GFE metric views: -* `cloud.google.com/java/bigtable/gfe_latency`: A distribution of the latency -between Google's network receives an RPC and reads back the first byte of -the response. - -* `cloud.google.com/java/bigtable/gfe_header_missing_count`: A counter of the -number of RPC responses received without the server-timing header, which -indicates that the request probably never reached Google's network. - -By default, the functionality is disabled. For example to enable metrics using -[Google Stackdriver](https://cloud.google.com/monitoring/docs/): - - -[//]: # (TODO: figure out how to keep opencensus version in sync with pom.xml) - -If you are using Maven, add this to your pom.xml file -```xml - - io.opencensus - opencensus-impl - 0.24.0 - runtime - - - io.opencensus - opencensus-exporter-stats-stackdriver - 0.24.0 - - - io.grpc - * - - - com.google.auth - * - - - -``` -If you are using Gradle, add this to your dependencies -```Groovy -compile 'io.opencensus:opencensus-impl:0.24.0' -compile 'io.opencensus:opencensus-exporter-stats-stackdriver:0.24.0' -``` -If you are using SBT, add this to your dependencies -```Scala -libraryDependencies += "io.opencensus" % "opencensus-impl" % "0.24.0" -libraryDependencies += "io.opencensus" % "opencensus-exporter-stats-stackdriver" % "0.24.0" -``` - -At the start of your application configure the exporter and enable the Bigtable stats views: +If your application already has OpenCensus Tracing integration and you want to disable Bigtable +traces, you can do the following: ```java -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration; -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsExporter; - -StackdriverStatsExporter.createAndRegister( - StackdriverStatsConfiguration.builder() - .setProjectId("YOUR_PROJECT_ID") - .build() -); - -BigtableDataSettings.enableOpenCensusStats(); -// Enable GFE metric views -BigtableDataSettings.enableGfeOpenCensusStats(); +public static class MySampler extends Sampler { + + private final Sampler childSampler; + + MySampler(Sampler child) { + this.childSampler = child; + } + + @Override + public boolean shouldSample(@Nullable SpanContext parentContext, + @Nullable Boolean hasRemoteParent, + TraceId traceId, + SpanId spanId, + String name, + List parentLinks) { + if (name.contains("Bigtable")) { + return false; + } + return childSampler.shouldSample(parentContext, hasRemoteParent, traceId, spanId, name, parentLinks); + } + + @Override + public String getDescription() { + return "from my sampler"; + } +} ``` -You can view the metrics on the Google Cloud Platform Console -[Metrics explorer](https://console.cloud.google.com/monitoring/metrics-explorer) -page. - -You can configure how frequently metrics are pushed to StackDriver and the -[Monitored resource type](https://cloud.google.com/monitoring/api/resources) by -updating `StackdriverStatsConfiguration`: - -``` java -// Example: configuring export interval and monitored resource type -StackdriverStatsExporter.createAndRegister( - StackdriverStatsConfiguration.builder() - .setProjectId("YOUR_PROJECT_ID") - // Exporting metrics every 10 seconds - .setExportInterval(Duration.create(10, 0)) - // Configure monitored resource type. A common practice is to use the - // monitored resource objects that represent the physical resources - // where your application code is running. See the full list of - // monitored resource type here: - // https://cloud.google.com/monitoring/api/resources - .setMonitoredResource(MonitoredResource.newBuilder() - .setType("global") - .putLabels("project_id", "YOUR_PROJECT_ID") - .build()) - .build() +And use this sampler in your trace config: +```java +Tracing.getTraceConfig().updateActiveTraceParams( + Tracing.getTraceConfig().getActiveTraceParams().toBuilder() + .setSampler(new MySampler(Samplers.probabilitySampler(0.1))) + .build() ); ``` @@ -490,6 +421,7 @@ Samples are in the [`samples/`](https://github.com/googleapis/java-bigtable/tree | Sample | Source Code | Try it | | --------------------------- | --------------------------------- | ------ | | Native Image Bigtable Sample | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/native-image-sample/src/main/java/com/example/bigtable/NativeImageBigtableSample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/native-image-sample/src/main/java/com/example/bigtable/NativeImageBigtableSample.java) | +| Authorized View Example | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java) | | Configure Connection Pool | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/snippets/src/main/java/com/example/bigtable/ConfigureConnectionPool.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigtable/ConfigureConnectionPool.java) | | Filters | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/snippets/src/main/java/com/example/bigtable/Filters.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigtable/Filters.java) | | Hello World | [source code](https://github.com/googleapis/java-bigtable/blob/main/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java) | [![Open in Cloud Shell][shell_img]](https://console.cloud.google.com/cloudshell/open?git_repo=https://github.com/googleapis/java-bigtable&page=editor&open_in_editor=samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java) | @@ -609,7 +541,7 @@ Java is a registered trademark of Oracle and/or its affiliates. [kokoro-badge-link-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigtable/java11.html [stability-image]: https://img.shields.io/badge/stability-stable-green [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-bigtable.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigtable/2.36.0 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigtable/2.37.0 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/google-cloud-bigtable-bom/pom.xml b/google-cloud-bigtable-bom/pom.xml index 9b0a9e5a4f..44ecd4f052 100644 --- a/google-cloud-bigtable-bom/pom.xml +++ b/google-cloud-bigtable-bom/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-bigtable-bom - 2.37.0 + 2.38.0 pom com.google.cloud @@ -63,42 +63,42 @@ com.google.cloud google-cloud-bigtable - 2.37.0 + 2.38.0 com.google.cloud google-cloud-bigtable-emulator - 0.174.0 + 0.175.0 com.google.cloud google-cloud-bigtable-emulator-core - 0.174.0 + 0.175.0 com.google.api.grpc grpc-google-cloud-bigtable-admin-v2 - 2.37.0 + 2.38.0 com.google.api.grpc grpc-google-cloud-bigtable-v2 - 2.37.0 + 2.38.0 com.google.api.grpc proto-google-cloud-bigtable-admin-v2 - 2.37.0 + 2.38.0 com.google.api.grpc proto-google-cloud-bigtable-v2 - 2.37.0 + 2.38.0 com.google.cloud google-cloud-bigtable-stats - 2.37.0 + 2.38.0 diff --git a/google-cloud-bigtable-deps-bom/pom.xml b/google-cloud-bigtable-deps-bom/pom.xml index d6db550f52..02d0e4c4d2 100644 --- a/google-cloud-bigtable-deps-bom/pom.xml +++ b/google-cloud-bigtable-deps-bom/pom.xml @@ -13,7 +13,7 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.0 + 2.38.0 pom diff --git a/google-cloud-bigtable-emulator-core/pom.xml b/google-cloud-bigtable-emulator-core/pom.xml index f8d73ba00a..969f190862 100644 --- a/google-cloud-bigtable-emulator-core/pom.xml +++ b/google-cloud-bigtable-emulator-core/pom.xml @@ -7,11 +7,11 @@ google-cloud-bigtable-parent com.google.cloud - 2.37.0 + 2.38.0 google-cloud-bigtable-emulator-core - 0.174.0 + 0.175.0 A Java wrapper for the Cloud Bigtable emulator. diff --git a/google-cloud-bigtable-emulator/pom.xml b/google-cloud-bigtable-emulator/pom.xml index fd783e146b..acae8063a1 100644 --- a/google-cloud-bigtable-emulator/pom.xml +++ b/google-cloud-bigtable-emulator/pom.xml @@ -5,7 +5,7 @@ 4.0.0 google-cloud-bigtable-emulator - 0.174.0 + 0.175.0 Google Cloud Java - Bigtable Emulator https://github.com/googleapis/java-bigtable @@ -14,7 +14,7 @@ com.google.cloud google-cloud-bigtable-parent - 2.37.0 + 2.38.0 scm:git:git@github.com:googleapis/java-bigtable.git @@ -81,14 +81,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.0 + 2.38.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.37.0 + 2.38.0 pom import @@ -99,7 +99,7 @@ com.google.cloud google-cloud-bigtable-emulator-core - 0.174.0 + 0.175.0 diff --git a/google-cloud-bigtable-stats/clirr-ignored-differences.xml b/google-cloud-bigtable-stats/clirr-ignored-differences.xml deleted file mode 100644 index aa9be424a8..0000000000 --- a/google-cloud-bigtable-stats/clirr-ignored-differences.xml +++ /dev/null @@ -1,42 +0,0 @@ - - - - - 7005 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - *StatsRecorderWrapper* - *StatsRecorder* - - - - 7002 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - void record(java.lang.String, java.lang.String, java.lang.String, java.lang.String) - - - - 7002 - com/google/cloud/bigtable/stats/StatsRecorderWrapper - void putBatchRequestThrottled(long) - - - - 7005 - com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection - *StatsRecorderWrapperForConnection* - * - - - - 7002 - com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils$ResourceUtilsWrapper - * - - - - 7006 - com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils$ResourceUtilsWrapper - * - * - - diff --git a/google-cloud-bigtable-stats/pom.xml b/google-cloud-bigtable-stats/pom.xml deleted file mode 100644 index 638d0ce2a7..0000000000 --- a/google-cloud-bigtable-stats/pom.xml +++ /dev/null @@ -1,269 +0,0 @@ - - - - com.google.cloud - google-cloud-bigtable-parent - 2.37.0 - - 4.0.0 - - - google-cloud-bigtable-stats - 2.37.0 - Experimental project to shade OpenCensus dependencies. - - - - - com.google.cloud - google-cloud-bigtable-deps-bom - 2.37.0 - pom - import - - - - - - - - io.opencensus - opencensus-api - - - io.opencensus - opencensus-exporter-stats-stackdriver - - - io.opencensus - opencensus-contrib-resource-util - - - io.opencensus - opencensus-impl - runtime - - - - - com.google.cloud - google-cloud-monitoring - - - - com.google.http-client - google-http-client-gson - - - com.google.http-client - google-http-client - - - - io.perfmark - perfmark-api - - - - - com.google.api.grpc - proto-google-cloud-monitoring-v3 - - - com.google.api.grpc - proto-google-common-protos - - - com.google.auth - google-auth-library-credentials - - - com.google.api - gax - - - - com.google.http-client - google-http-client - - - com.google.http-client - google-http-client-gson - - - - - com.google.api - api-common - - - com.google.api - gax-grpc - - - com.google.protobuf - protobuf-java - - - com.google.guava - guava - - - org.threeten - threetenbp - - - com.google.code.findbugs - jsr305 - - - - - com.google.http-client - google-http-client - runtime - - - - - com.google.truth - truth - test - - - junit - junit - test - - - org.mockito - mockito-core - test - - - - - - - org.apache.maven.plugins - maven-shade-plugin - 3.2.4 - - - package - - shade - - - false - true - - - - io.opencensus:* - - - - - io.opencensus - - com.google.bigtable.veneer.repackaged.io.opencensus - - - - - - - - - - - - - - org.apache.maven.plugins - maven-dependency-plugin - 3.6.1 - - - - - - - - io.opencensus:opencensus-exporter-metrics-util:* - io.opencensus:opencensus-exporter-stats-stackdriver:* - - - - - org.codehaus.mojo - clirr-maven-plugin - - - com/google/bigtable/veneer/repackaged/** - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - enforce-version-consistency - - enforce - - - - - - - - - io.opencensus:*:[0.31.1] - io.opencensus:opencensus-proto:[0.2.0] - - - - - - - - - org.codehaus.mojo - license-maven-plugin - 2.4.0 - - - default-cli - generate-resources - - add-third-party - - - test - - io.opencensus:* - true - - - - - - - - - test - - - - - - diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java deleted file mode 100644 index d8936b0e0e..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporter.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.MonitoredResource; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.monitoring.v3.CreateTimeSeriesRequest; -import com.google.monitoring.v3.ProjectName; -import io.opencensus.exporter.metrics.util.MetricExporter; -import io.opencensus.metrics.export.Metric; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.stream.Collectors; - -final class BigtableCreateTimeSeriesExporter extends MetricExporter { - private static final Logger logger = - Logger.getLogger(BigtableCreateTimeSeriesExporter.class.getName()); - private final MetricServiceClient metricServiceClient; - private final MonitoredResource gceOrGkeMonitoredResource; - private final String clientId; - - BigtableCreateTimeSeriesExporter( - MetricServiceClient metricServiceClient, MonitoredResource gceOrGkeMonitoredResource) { - this.metricServiceClient = metricServiceClient; - this.clientId = BigtableStackdriverExportUtils.getDefaultTaskValue(); - this.gceOrGkeMonitoredResource = gceOrGkeMonitoredResource; - } - - public void export(Collection metrics) { - Map> projectToTimeSeries = new HashMap<>(); - - for (Metric metric : metrics) { - // only export bigtable metrics - if (!BigtableStackdriverExportUtils.shouldExportMetric(metric.getMetricDescriptor())) { - continue; - } - - projectToTimeSeries = - metric.getTimeSeriesList().stream() - .collect( - Collectors.groupingBy( - timeSeries -> - BigtableStackdriverExportUtils.getProjectId( - metric.getMetricDescriptor(), timeSeries, gceOrGkeMonitoredResource), - Collectors.mapping( - timeSeries -> - BigtableStackdriverExportUtils.convertTimeSeries( - metric.getMetricDescriptor(), - timeSeries, - clientId, - gceOrGkeMonitoredResource), - Collectors.toList()))); - - for (Map.Entry> entry : - projectToTimeSeries.entrySet()) { - ProjectName projectName = ProjectName.of(entry.getKey()); - CreateTimeSeriesRequest request = - CreateTimeSeriesRequest.newBuilder() - .setName(projectName.toString()) - .addAllTimeSeries(entry.getValue()) - .build(); - try { - this.metricServiceClient.createServiceTimeSeries(request); - } catch (Throwable e) { - logger.log( - Level.WARNING, - "Exception thrown when exporting TimeSeries for projectName=" - + projectName.getProject(), - e); - } - } - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java deleted file mode 100644 index cc70fbb435..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverExportUtils.java +++ /dev/null @@ -1,367 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.cloud.bigtable.stats.BuiltinViewConstants.PER_CONNECTION_ERROR_COUNT_VIEW; - -import com.google.api.Distribution.BucketOptions; -import com.google.api.Distribution.BucketOptions.Explicit; -import com.google.api.Metric; -import com.google.api.MetricDescriptor.MetricKind; -import com.google.api.MonitoredResource; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Maps; -import com.google.monitoring.v3.TimeInterval; -import com.google.monitoring.v3.TypedValue; -import io.opencensus.common.Function; -import io.opencensus.common.Functions; -import io.opencensus.common.Timestamp; -import io.opencensus.metrics.LabelKey; -import io.opencensus.metrics.LabelValue; -import io.opencensus.metrics.export.Distribution; -import io.opencensus.metrics.export.Distribution.Bucket; -import io.opencensus.metrics.export.Distribution.BucketOptions.ExplicitOptions; -import io.opencensus.metrics.export.MetricDescriptor; -import io.opencensus.metrics.export.MetricDescriptor.Type; -import io.opencensus.metrics.export.Point; -import io.opencensus.metrics.export.Summary; -import io.opencensus.metrics.export.TimeSeries; -import io.opencensus.metrics.export.Value; -import java.lang.management.ManagementFactory; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.security.SecureRandom; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.UUID; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -class BigtableStackdriverExportUtils { - private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw"; - - @VisibleForTesting static final String GCE_RESOURCE_TYPE = "gce_instance"; - @VisibleForTesting static final String GKE_RESOURCE_TYPE = "k8s_container"; - @VisibleForTesting static final String GCE_OR_GKE_PROJECT_ID_KEY = "project_id"; - private static final Logger logger = - Logger.getLogger(BigtableStackdriverExportUtils.class.getName()); - - private static final Function typedValueDoubleFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - builder.setDoubleValue(arg); - return builder.build(); - }; - private static final Function typedValueLongFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - builder.setInt64Value(arg); - return builder.build(); - }; - private static final Function typedValueDistributionFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - return builder - .setDistributionValue(BigtableStackdriverExportUtils.createDistribution(arg)) - .build(); - }; - private static final Function typedValueSummaryFunction = - arg -> { - TypedValue.Builder builder = TypedValue.newBuilder(); - return builder.build(); - }; - private static final Function bucketOptionsExplicitFunction = - arg -> { - BucketOptions.Builder builder = BucketOptions.newBuilder(); - Explicit.Builder explicitBuilder = Explicit.newBuilder(); - explicitBuilder.addBounds(0.0D); - explicitBuilder.addAllBounds(arg.getBucketBoundaries()); - builder.setExplicitBuckets(explicitBuilder.build()); - return builder.build(); - }; - - // promote the following metric labels to Bigtable monitored resource labels - private static final Set PROMOTED_BIGTABLE_RESOURCE_LABELS = - ImmutableSet.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), - BuiltinMeasureConstants.INSTANCE_ID.getName(), - BuiltinMeasureConstants.CLUSTER.getName(), - BuiltinMeasureConstants.ZONE.getName(), - BuiltinMeasureConstants.TABLE.getName()); - - private static final LabelKey CLIENT_UID_LABEL_KEY = - LabelKey.create(BuiltinMeasureConstants.CLIENT_UID.getName(), "client uid"); - - static boolean isBigtableTableMetric(MetricDescriptor metricDescriptor) { - return metricDescriptor.getName().contains("bigtable") - && !metricDescriptor.getName().equals(PER_CONNECTION_ERROR_COUNT_VIEW.getName().asString()); - } - - static boolean shouldExportMetric(MetricDescriptor metricDescriptor) { - return isBigtableTableMetric(metricDescriptor) - || (metricDescriptor.getName().equals(PER_CONNECTION_ERROR_COUNT_VIEW.getName().asString()) - && (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke())); - } - - static com.google.monitoring.v3.TimeSeries convertTimeSeries( - MetricDescriptor metricDescriptor, - TimeSeries timeSeries, - String clientId, - MonitoredResource gceOrGkeMonitoredResource) { - Type metricType = metricDescriptor.getType(); - - com.google.monitoring.v3.TimeSeries.Builder builder; - if (isBigtableTableMetric(metricDescriptor)) { - builder = - setupBuilderForBigtableResource( - metricDescriptor, - MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE), - timeSeries, - clientId); - } else if (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke()) { - builder = - setupBuilderForGceOrGKEResource( - metricDescriptor, gceOrGkeMonitoredResource, timeSeries, clientId); - } else { - logger.warning( - "Trying to export metric " - + metricDescriptor.getName() - + " in a non-GCE/GKE environment."); - return com.google.monitoring.v3.TimeSeries.newBuilder().build(); - } - builder.setMetricKind(createMetricKind(metricType)); - builder.setValueType(createValueType(metricType)); - Timestamp startTimeStamp = timeSeries.getStartTimestamp(); - for (Point point : timeSeries.getPoints()) { - builder.addPoints(createPoint(point, startTimeStamp)); - } - return builder.build(); - } - - private static com.google.monitoring.v3.TimeSeries.Builder setupBuilderForBigtableResource( - MetricDescriptor metricDescriptor, - MonitoredResource.Builder monitoredResourceBuilder, - TimeSeries timeSeries, - String clientId) { - List labelKeys = metricDescriptor.getLabelKeys(); - String metricName = metricDescriptor.getName(); - List metricTagKeys = new ArrayList<>(); - List metricTagValues = new ArrayList<>(); - - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelValues.size(); i++) { - // If the label is defined in the monitored resource, convert it to - // a monitored resource label. Otherwise, keep it as a metric label. - if (PROMOTED_BIGTABLE_RESOURCE_LABELS.contains(labelKeys.get(i).getKey())) { - monitoredResourceBuilder.putLabels( - labelKeys.get(i).getKey(), labelValues.get(i).getValue()); - } else { - metricTagKeys.add(labelKeys.get(i)); - metricTagValues.add(labelValues.get(i)); - } - } - metricTagKeys.add(CLIENT_UID_LABEL_KEY); - metricTagValues.add(LabelValue.create(clientId)); - - com.google.monitoring.v3.TimeSeries.Builder builder = - com.google.monitoring.v3.TimeSeries.newBuilder(); - builder.setResource(monitoredResourceBuilder.build()); - builder.setMetric(createMetric(metricName, metricTagKeys, metricTagValues)); - - return builder; - } - - private static com.google.monitoring.v3.TimeSeries.Builder setupBuilderForGceOrGKEResource( - MetricDescriptor metricDescriptor, - MonitoredResource gceOrGkeMonitoredResource, - TimeSeries timeSeries, - String clientId) { - List labelKeys = metricDescriptor.getLabelKeys(); - String metricName = metricDescriptor.getName(); - List metricTagKeys = new ArrayList<>(); - List metricTagValues = new ArrayList<>(); - - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelValues.size(); i++) { - metricTagKeys.add(labelKeys.get(i)); - metricTagValues.add(labelValues.get(i)); - } - metricTagKeys.add(CLIENT_UID_LABEL_KEY); - metricTagValues.add(LabelValue.create(clientId)); - - com.google.monitoring.v3.TimeSeries.Builder builder = - com.google.monitoring.v3.TimeSeries.newBuilder(); - builder.setResource(gceOrGkeMonitoredResource); - builder.setMetric(createMetric(metricName, metricTagKeys, metricTagValues)); - - return builder; - } - - static String getProjectId( - MetricDescriptor metricDescriptor, - TimeSeries timeSeries, - MonitoredResource gceOrGkeMonitoredResource) { - if (isBigtableTableMetric(metricDescriptor)) { - return getProjectIdForBigtableTableResource(metricDescriptor, timeSeries); - } else { - return getProjectIdForGceOrGkeResource(gceOrGkeMonitoredResource); - } - } - - static String getProjectIdForBigtableTableResource( - MetricDescriptor metricDescriptor, TimeSeries timeSeries) { - List labelKeys = metricDescriptor.getLabelKeys(); - List labelValues = timeSeries.getLabelValues(); - for (int i = 0; i < labelKeys.size(); i++) { - if (labelKeys.get(i).getKey().equals(BuiltinMeasureConstants.PROJECT_ID.getName())) { - return labelValues.get(i).getValue(); - } - } - throw new IllegalStateException("Can't find project id for the current timeseries"); - } - - static String getProjectIdForGceOrGkeResource(MonitoredResource gceOrGkeMonitoredResource) { - if (!gceOrGkeMonitoredResource.getType().equals(GCE_RESOURCE_TYPE) - && !gceOrGkeMonitoredResource.getType().equals(GKE_RESOURCE_TYPE)) { - throw new IllegalStateException( - "Expected GCE or GKE resource type, but found " + gceOrGkeMonitoredResource); - } - return gceOrGkeMonitoredResource.getLabelsOrThrow(GCE_OR_GKE_PROJECT_ID_KEY); - } - - static String getDefaultTaskValue() { - // Something like '@' - final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); - // If not the expected format then generate a random number. - if (jvmName.indexOf('@') < 1) { - String hostname = "localhost"; - try { - hostname = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - logger.log(Level.INFO, "Unable to get the hostname.", e); - } - // Generate a random number and use the same format "random_number@hostname". - return "java-" + new SecureRandom().nextInt() + "@" + hostname; - } - return "java-" + UUID.randomUUID() + jvmName; - } - - private static MetricKind createMetricKind(Type type) { - switch (type) { - case CUMULATIVE_DOUBLE: - case CUMULATIVE_INT64: - case CUMULATIVE_DISTRIBUTION: - return MetricKind.CUMULATIVE; - default: - return MetricKind.UNRECOGNIZED; - } - } - - private static com.google.api.MetricDescriptor.ValueType createValueType(Type type) { - switch (type) { - case CUMULATIVE_DOUBLE: - return com.google.api.MetricDescriptor.ValueType.DOUBLE; - case CUMULATIVE_INT64: - return com.google.api.MetricDescriptor.ValueType.INT64; - case CUMULATIVE_DISTRIBUTION: - return com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; - default: - return com.google.api.MetricDescriptor.ValueType.UNRECOGNIZED; - } - } - - private static Metric createMetric( - String metricName, List labelKeys, List labelValues) { - Metric.Builder builder = Metric.newBuilder(); - builder.setType(metricName); - Map stringTagMap = Maps.newHashMap(); - - for (int i = 0; i < labelValues.size(); ++i) { - String value = labelValues.get(i).getValue(); - if (value != null) { - stringTagMap.put(labelKeys.get(i).getKey(), value); - } - } - - builder.putAllLabels(stringTagMap); - return builder.build(); - } - - private static com.google.monitoring.v3.Point createPoint(Point point, Timestamp startTimestamp) { - com.google.monitoring.v3.TimeInterval.Builder timeIntervalBuilder = TimeInterval.newBuilder(); - timeIntervalBuilder.setStartTime(convertTimestamp(startTimestamp)); - timeIntervalBuilder.setEndTime(convertTimestamp(point.getTimestamp())); - - com.google.monitoring.v3.Point.Builder builder = com.google.monitoring.v3.Point.newBuilder(); - builder.setInterval(timeIntervalBuilder.build()); - builder.setValue(createTypedValue(point.getValue())); - return builder.build(); - } - - private static TypedValue createTypedValue(Value value) { - return value.match( - typedValueDoubleFunction, - typedValueLongFunction, - typedValueDistributionFunction, - typedValueSummaryFunction, - Functions.throwIllegalArgumentException()); - } - - private static com.google.api.Distribution createDistribution(Distribution distribution) { - com.google.api.Distribution.Builder builder = - com.google.api.Distribution.newBuilder() - .setBucketOptions(createBucketOptions(distribution.getBucketOptions())) - .setCount(distribution.getCount()) - .setMean( - distribution.getCount() == 0L - ? 0.0D - : distribution.getSum() / (double) distribution.getCount()) - .setSumOfSquaredDeviation(distribution.getSumOfSquaredDeviations()); - setBucketCounts(distribution.getBuckets(), builder); - return builder.build(); - } - - private static BucketOptions createBucketOptions( - @Nullable Distribution.BucketOptions bucketOptions) { - com.google.api.Distribution.BucketOptions.Builder builder = BucketOptions.newBuilder(); - return bucketOptions == null - ? builder.build() - : bucketOptions.match( - bucketOptionsExplicitFunction, Functions.throwIllegalArgumentException()); - } - - private static void setBucketCounts( - List buckets, com.google.api.Distribution.Builder builder) { - builder.addBucketCounts(0L); - - for (Bucket bucket : buckets) { - builder.addBucketCounts(bucket.getCount()); - } - } - - private static com.google.protobuf.Timestamp convertTimestamp(Timestamp censusTimestamp) { - return censusTimestamp.getSeconds() < 0L - ? com.google.protobuf.Timestamp.newBuilder().build() - : com.google.protobuf.Timestamp.newBuilder() - .setSeconds(censusTimestamp.getSeconds()) - .setNanos(censusTimestamp.getNanos()) - .build(); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java deleted file mode 100644 index 856353cfd0..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BigtableStackdriverStatsExporter.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.MonitoredResource; -import com.google.api.core.InternalApi; -import com.google.api.gax.core.FixedCredentialsProvider; -import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; -import com.google.auth.Credentials; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.cloud.monitoring.v3.MetricServiceSettings; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.MoreObjects; -import com.google.common.base.Preconditions; -import io.opencensus.common.Duration; -import io.opencensus.exporter.metrics.util.IntervalMetricReader; -import io.opencensus.exporter.metrics.util.MetricReader; -import io.opencensus.exporter.stats.stackdriver.StackdriverStatsConfiguration; -import io.opencensus.metrics.Metrics; -import java.io.IOException; -import javax.annotation.Nullable; -import javax.annotation.concurrent.GuardedBy; - -@InternalApi -public class BigtableStackdriverStatsExporter { - static final Object lock = new Object(); - - @Nullable - @GuardedBy("lock") - private static BigtableStackdriverStatsExporter instance = null; - - // Default export interval is 1 minute - private static final Duration EXPORT_INTERVAL = Duration.create(60, 0); - - private static final String MONITORING_ENDPOINT = - MoreObjects.firstNonNull( - System.getProperty("bigtable.test-monitoring-endpoint"), - MetricServiceSettings.getDefaultEndpoint()); - - private final IntervalMetricReader intervalMetricReader; - - private BigtableStackdriverStatsExporter( - MetricServiceClient metricServiceClient, - Duration exportInterval, - MonitoredResource gceOrGkeMonitoredResource) { - IntervalMetricReader.Options.Builder intervalMetricReaderOptionsBuilder = - IntervalMetricReader.Options.builder(); - intervalMetricReaderOptionsBuilder.setExportInterval(exportInterval); - this.intervalMetricReader = - IntervalMetricReader.create( - new BigtableCreateTimeSeriesExporter(metricServiceClient, gceOrGkeMonitoredResource), - MetricReader.create( - MetricReader.Options.builder() - .setMetricProducerManager( - Metrics.getExportComponent().getMetricProducerManager()) - .build()), - intervalMetricReaderOptionsBuilder.build()); - } - - public static void register(Credentials credentials) throws IOException { - synchronized (lock) { - Preconditions.checkState( - instance == null, "Bigtable Stackdriver stats exporter is already created"); - // Default timeout for creating a client is 1 minute - MetricServiceClient client = createMetricServiceClient(credentials, Duration.create(60L, 0)); - MonitoredResource gceOrGkeMonitoredResource = null; - if (ConsumerEnvironmentUtils.isEnvGce() || ConsumerEnvironmentUtils.isEnvGke()) { - gceOrGkeMonitoredResource = - StackdriverStatsConfiguration.builder().build().getMonitoredResource(); - } - instance = - new BigtableStackdriverStatsExporter(client, EXPORT_INTERVAL, gceOrGkeMonitoredResource); - } - } - - @GuardedBy("lock") - @VisibleForTesting - static MetricServiceClient createMetricServiceClient(Credentials credentials, Duration deadline) - throws IOException { - MetricServiceSettings.Builder settingsBuilder = - MetricServiceSettings.newBuilder() - .setTransportChannelProvider(InstantiatingGrpcChannelProvider.newBuilder().build()); - settingsBuilder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); - settingsBuilder.setEndpoint(MONITORING_ENDPOINT); - org.threeten.bp.Duration timeout = org.threeten.bp.Duration.ofMillis(deadline.toMillis()); - settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout); - return MetricServiceClient.create(settingsBuilder.build()); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java deleted file mode 100644 index 59e7511d41..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinMeasureConstants.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static io.opencensus.stats.Measure.MeasureLong; - -import io.opencensus.tags.TagKey; - -/** Built-in metrics that will be readable under bigtable.googleapis.com/client namespace */ -class BuiltinMeasureConstants { - // Monitored resource TagKeys - static final TagKey PROJECT_ID = TagKey.create("project_id"); - static final TagKey INSTANCE_ID = TagKey.create("instance"); - static final TagKey CLUSTER = TagKey.create("cluster"); - static final TagKey TABLE = TagKey.create("table"); - static final TagKey ZONE = TagKey.create("zone"); - static final TagKey CLIENT_UID = TagKey.create("client_uid"); - - // Metrics TagKeys - static final TagKey APP_PROFILE = TagKey.create("app_profile"); - static final TagKey METHOD = TagKey.create("method"); - static final TagKey STREAMING = TagKey.create("streaming"); - static final TagKey STATUS = TagKey.create("status"); - static final TagKey CLIENT_NAME = TagKey.create("client_name"); - - // Units - private static final String COUNT = "1"; - private static final String MILLISECOND = "ms"; - - // Measurements - static final MeasureLong OPERATION_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/operation_latencies", - "Total time until final operation success or failure, including retries and backoff.", - MILLISECOND); - - static final MeasureLong ATTEMPT_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/attempt_latencies", - "Client observed latency per RPC attempt.", - MILLISECOND); - - static final MeasureLong RETRY_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/retry_count", - "The number of additional RPCs sent after the initial attempt.", - COUNT); - - static final MeasureLong FIRST_RESPONSE_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/first_response_latencies", - "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.", - MILLISECOND); - - static final MeasureLong SERVER_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/server_latencies", - "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.", - MILLISECOND); - - static final MeasureLong CONNECTIVITY_ERROR_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/connectivity_error_count", - "Number of requests that failed to reach the Google datacenter. (Requests without google response headers).", - COUNT); - - static final MeasureLong APPLICATION_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/application_latencies", - "The latency of the client application consuming available response data.", - MILLISECOND); - - static final MeasureLong THROTTLING_LATENCIES = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/throttling_latencies", - "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.", - MILLISECOND); - - static final MeasureLong PER_CONNECTION_ERROR_COUNT = - MeasureLong.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "Distribution of counts of channels per 'error count per minute'.", - COUNT); -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java deleted file mode 100644 index 82ce61e2d3..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViewConstants.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.APPLICATION_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.APP_PROFILE; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.ATTEMPT_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CLIENT_NAME; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CLUSTER; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.CONNECTIVITY_ERROR_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.FIRST_RESPONSE_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.INSTANCE_ID; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.METHOD; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.OPERATION_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.PER_CONNECTION_ERROR_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.PROJECT_ID; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.RETRY_COUNT; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.SERVER_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.STATUS; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.STREAMING; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.TABLE; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.THROTTLING_LATENCIES; -import static com.google.cloud.bigtable.stats.BuiltinMeasureConstants.ZONE; -import static io.opencensus.stats.Aggregation.Distribution; -import static io.opencensus.stats.Aggregation.Sum; - -import com.google.common.collect.ImmutableList; -import io.opencensus.stats.Aggregation; -import io.opencensus.stats.BucketBoundaries; -import io.opencensus.stats.View; - -/** Create built-in metrics views under bigtable.googleapis.com/internal/client namespace */ -class BuiltinViewConstants { - private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 0.0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, - 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, - 250.0, 300.0, 400.0, 500.0, 650.0, 800.0, 1000.0, 2000.0, 5000.0, 10000.0, - 20000.0, 50000.0, 100000.0))); - - private static final Aggregation AGGREGATION_RETRY_COUNT = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 15.0, 20.0, 30.0, 40.0, 50.0, - 100.0))); - - private static final Aggregation PER_CONNECTION_ERROR_COUNT_AGGREGATION = - Distribution.create( - BucketBoundaries.create( - ImmutableList.of( - 1.0, - 2.0, - 4.0, - 8.0, - 16.0, - 32.0, - 64.0, - 125.0, - 250.0, - 500.0, - 1_000.0, - 2_000.0, - 4_000.0, - 8_000.0, - 16_000.0, - 32_000.0, - 64_000.0, - 128_000.0, - 250_000.0, - 500_000.0, - 1_000_000.0))); - - private static final Aggregation AGGREGATION_COUNT = Sum.create(); - - static final View OPERATION_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/operation_latencies"), - "Total time until final operation success or failure, including retries and backoff.", - OPERATION_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STREAMING, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View ATTEMPT_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/attempt_latencies"), - "Client observed latency per RPC attempt.", - ATTEMPT_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STREAMING, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View RETRY_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/retry_count"), - "The number of additional RPCs sent after the initial attempt.", - RETRY_COUNT, - AGGREGATION_COUNT, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View FIRST_RESPONSE_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/first_response_latencies"), - "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.", - FIRST_RESPONSE_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View SERVER_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/server_latencies"), - "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.", - SERVER_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - STREAMING, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View CONNECTIVITY_ERROR_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/connectivity_error_count"), - "Number of requests that failed to reach the Google datacenter. (Requests without google response headers).", - CONNECTIVITY_ERROR_COUNT, - AGGREGATION_COUNT, - ImmutableList.of( - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE, - METHOD, - STATUS, - CLIENT_NAME, - CLUSTER, - ZONE, - TABLE)); - - static final View APPLICATION_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/application_latencies"), - "The latency of the client application consuming available response data.", - APPLICATION_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, INSTANCE_ID, APP_PROFILE, METHOD, CLIENT_NAME, CLUSTER, ZONE, TABLE)); - - static final View THROTTLING_LATENCIES_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/throttling_latencies"), - "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.", - THROTTLING_LATENCIES, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - ImmutableList.of( - PROJECT_ID, INSTANCE_ID, APP_PROFILE, METHOD, CLIENT_NAME, CLUSTER, ZONE, TABLE)); - - static final View PER_CONNECTION_ERROR_COUNT_VIEW = - View.create( - View.Name.create("bigtable.googleapis.com/internal/client/per_connection_error_count"), - "Distribution of counts of channels per 'error count per minute'.", - PER_CONNECTION_ERROR_COUNT, - PER_CONNECTION_ERROR_COUNT_AGGREGATION, - ImmutableList.of(PROJECT_ID, INSTANCE_ID, APP_PROFILE, CLIENT_NAME)); -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java deleted file mode 100644 index 2b91ee60c3..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/BuiltinViews.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableSet; -import io.opencensus.stats.Stats; -import io.opencensus.stats.View; -import io.opencensus.stats.ViewManager; - -/** For registering built-in metric views */ -@InternalApi("For internal use only") -public class BuiltinViews { - - @VisibleForTesting - static final ImmutableSet BIGTABLE_BUILTIN_VIEWS = - ImmutableSet.of( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - BuiltinViewConstants.RETRY_COUNT_VIEW, - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW); - // We store views that don't use the Bigtable schema and need different tags in a separate set to - // simplify testing. - static final ImmutableSet NON_BIGTABLE_BUILTIN_VIEWS = - ImmutableSet.of(BuiltinViewConstants.PER_CONNECTION_ERROR_COUNT_VIEW); - - @VisibleForTesting - void registerPrivateViews(ViewManager viewManager) { - for (View view : BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - for (View view : NON_BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - } - - public static void registerBigtableBuiltinViews() { - ViewManager viewManager = Stats.getViewManager(); - for (View view : BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - for (View view : NON_BIGTABLE_BUILTIN_VIEWS) { - viewManager.registerView(view); - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java deleted file mode 100644 index 8c84850f6a..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/ConsumerEnvironmentUtils.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.common.annotations.VisibleForTesting; -import io.opencensus.contrib.resource.util.CloudResource; -import io.opencensus.contrib.resource.util.ContainerResource; -import io.opencensus.contrib.resource.util.HostResource; -import io.opencensus.contrib.resource.util.ResourceUtils; -import io.opencensus.resource.Resource; -import java.util.Objects; - -/** A class for extracting details about consumer environments (GCE and GKE) for metrics. */ -class ConsumerEnvironmentUtils { - - private static ResourceUtilsWrapper resourceUtilsWrapper = new ResourceUtilsWrapper(); - - @VisibleForTesting - public static void setResourceUtilsWrapper(ResourceUtilsWrapper newResourceUtilsWrapper) { - resourceUtilsWrapper = newResourceUtilsWrapper; - } - - public static boolean isEnvGce() { - Resource resource = resourceUtilsWrapper.detectOpenCensusResource(); - return Objects.equals(resource.getType(), HostResource.TYPE) - && Objects.equals( - resource.getLabels().get(CloudResource.PROVIDER_KEY), CloudResource.PROVIDER_GCP); - } - - public static boolean isEnvGke() { - Resource resource = resourceUtilsWrapper.detectOpenCensusResource(); - return Objects.equals(resource.getType(), ContainerResource.TYPE) - && Objects.equals( - resource.getLabels().get(CloudResource.PROVIDER_KEY), CloudResource.PROVIDER_GCP); - } - - // We wrap the static ResourceUtils.detectResource() method in a non-static method for mocking. - @VisibleForTesting - public static class ResourceUtilsWrapper { - public Resource detectOpenCensusResource() { - return ResourceUtils.detectResource(); - } - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java deleted file mode 100644 index 6bf0988b91..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapper.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import com.google.api.gax.tracing.ApiTracerFactory.OperationType; -import com.google.api.gax.tracing.SpanName; -import io.opencensus.stats.MeasureMap; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.tags.TagContext; -import io.opencensus.tags.TagContextBuilder; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import io.opencensus.tags.Tagger; -import io.opencensus.tags.Tags; -import java.util.Map; - -/** A wrapper to record built-in metrics */ -@InternalApi("For internal use only") -public class StatsRecorderWrapper { - - private final OperationType operationType; - - private final Tagger tagger; - private final StatsRecorder statsRecorder; - private final TagContext parentContext; - private final SpanName spanName; - private final Map statsAttributes; - - private MeasureMap attemptMeasureMap; - private MeasureMap operationMeasureMap; - - public StatsRecorderWrapper( - OperationType operationType, - SpanName spanName, - Map statsAttributes, - StatsRecorder statsRecorder) { - this.operationType = operationType; - this.tagger = Tags.getTagger(); - this.statsRecorder = statsRecorder; - this.spanName = spanName; - this.parentContext = tagger.getCurrentTagContext(); - this.statsAttributes = statsAttributes; - - this.attemptMeasureMap = statsRecorder.newMeasureMap(); - this.operationMeasureMap = statsRecorder.newMeasureMap(); - } - - public void recordOperation(String status, String tableId, String zone, String cluster) { - TagContextBuilder tagCtx = - newTagContextBuilder(tableId, zone, cluster) - .putLocal(BuiltinMeasureConstants.STATUS, TagValue.create(status)); - - boolean isStreaming = operationType == OperationType.ServerStreaming; - tagCtx.putLocal( - BuiltinMeasureConstants.STREAMING, TagValue.create(Boolean.toString(isStreaming))); - - operationMeasureMap.record(tagCtx.build()); - // Reinitialize a new map - operationMeasureMap = statsRecorder.newMeasureMap(); - } - - public void recordAttempt(String status, String tableId, String zone, String cluster) { - TagContextBuilder tagCtx = - newTagContextBuilder(tableId, zone, cluster) - .putLocal(BuiltinMeasureConstants.STATUS, TagValue.create(status)); - - boolean isStreaming = operationType == OperationType.ServerStreaming; - tagCtx.putLocal( - BuiltinMeasureConstants.STREAMING, TagValue.create(Boolean.toString(isStreaming))); - - attemptMeasureMap.record(tagCtx.build()); - // Reinitialize a new map - attemptMeasureMap = statsRecorder.newMeasureMap(); - } - - public void putOperationLatencies(long operationLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.OPERATION_LATENCIES, operationLatency); - } - - public void putAttemptLatencies(long attemptLatency) { - attemptMeasureMap.put(BuiltinMeasureConstants.ATTEMPT_LATENCIES, attemptLatency); - } - - public void putRetryCount(int attemptCount) { - operationMeasureMap.put(BuiltinMeasureConstants.RETRY_COUNT, attemptCount); - } - - public void putApplicationLatencies(long applicationLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.APPLICATION_LATENCIES, applicationLatency); - } - - public void putFirstResponseLatencies(long firstResponseLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.FIRST_RESPONSE_LATENCIES, firstResponseLatency); - } - - public void putGfeLatencies(long serverLatency) { - attemptMeasureMap.put(BuiltinMeasureConstants.SERVER_LATENCIES, serverLatency); - } - - public void putGfeMissingHeaders(long connectivityErrors) { - attemptMeasureMap.put(BuiltinMeasureConstants.CONNECTIVITY_ERROR_COUNT, connectivityErrors); - } - - public void putClientBlockingLatencies(long clientBlockingLatency) { - operationMeasureMap.put(BuiltinMeasureConstants.THROTTLING_LATENCIES, clientBlockingLatency); - } - - private TagContextBuilder newTagContextBuilder(String tableId, String zone, String cluster) { - TagContextBuilder tagContextBuilder = - tagger - .toBuilder(parentContext) - .putLocal(BuiltinMeasureConstants.METHOD, TagValue.create(spanName.toString())) - .putLocal(BuiltinMeasureConstants.TABLE, TagValue.create(tableId)) - .putLocal(BuiltinMeasureConstants.ZONE, TagValue.create(zone)) - .putLocal(BuiltinMeasureConstants.CLUSTER, TagValue.create(cluster)); - for (Map.Entry entry : statsAttributes.entrySet()) { - tagContextBuilder.putLocal(TagKey.create(entry.getKey()), TagValue.create(entry.getValue())); - } - return tagContextBuilder; - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java deleted file mode 100644 index 3c335d28bc..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperForConnection.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2024 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import com.google.api.core.InternalApi; -import io.opencensus.stats.MeasureMap; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.tags.TagContext; -import io.opencensus.tags.TagContextBuilder; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import io.opencensus.tags.Tagger; -import io.opencensus.tags.Tags; -import java.util.Map; - -/** A wrapper to record built-in metrics for connection metrics not tied to operations/RPCs. */ -@InternalApi("For internal use only") -public class StatsRecorderWrapperForConnection { - private final StatsRecorder statsRecorder; - private final TagContext tagContext; - private MeasureMap perConnectionErrorCountMeasureMap; - - public StatsRecorderWrapperForConnection( - Map statsAttributes, StatsRecorder statsRecorder) { - this.statsRecorder = statsRecorder; - - this.perConnectionErrorCountMeasureMap = statsRecorder.newMeasureMap(); - - Tagger tagger = Tags.getTagger(); - TagContextBuilder tagContextBuilder = tagger.toBuilder(tagger.getCurrentTagContext()); - for (Map.Entry entry : statsAttributes.entrySet()) { - tagContextBuilder.putLocal(TagKey.create(entry.getKey()), TagValue.create(entry.getValue())); - } - this.tagContext = tagContextBuilder.build(); - } - - public void putAndRecordPerConnectionErrorCount(long errorCount) { - perConnectionErrorCountMeasureMap.put( - BuiltinMeasureConstants.PER_CONNECTION_ERROR_COUNT, errorCount); - - perConnectionErrorCountMeasureMap.record(tagContext); - perConnectionErrorCountMeasureMap = statsRecorder.newMeasureMap(); - } -} diff --git a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java b/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java deleted file mode 100644 index fc6a072d01..0000000000 --- a/google-cloud-bigtable-stats/src/main/java/com/google/cloud/bigtable/stats/StatsWrapper.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; - -import com.google.api.core.InternalApi; -import com.google.api.gax.tracing.SpanName; -import io.opencensus.stats.Stats; -import io.opencensus.stats.View; -import io.opencensus.tags.TagKey; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * Wrapper class for accessing opencensus. We use a shaded version of opencensus to avoid polluting - * the global opencensus namespace. And this provides a facade that will not be relocated. - */ -@InternalApi("For internal use only") -public class StatsWrapper { - public static StatsRecorderWrapper createRecorder( - OperationType operationType, SpanName spanName, Map statsAttributes) { - return new StatsRecorderWrapper( - operationType, spanName, statsAttributes, Stats.getStatsRecorder()); - } - - public static StatsRecorderWrapperForConnection createRecorderForConnection( - Map statsAttributes) { - return new StatsRecorderWrapperForConnection(statsAttributes, Stats.getStatsRecorder()); - } - - // This is used in integration tests to get the tag value strings from view manager because Stats - // is relocated to com.google.bigtable.veneer.repackaged.io.opencensus. - @InternalApi("Visible for testing") - public static List getOperationLatencyViewTagValueStrings() { - return Stats.getViewManager().getView(BuiltinViewConstants.OPERATION_LATENCIES_VIEW.getName()) - .getAggregationMap().entrySet().stream() - .map(Map.Entry::getKey) - .flatMap(x -> x.stream()) - .map(x -> x.asString()) - .collect(Collectors.toCollection(ArrayList::new)); - } - - // A workaround to run ITBuiltinViewConstantsTest as integration test. Integration test runs after - // the packaging step. Opencensus classes will be relocated when they are packaged but the - // integration test files will not be. So the integration tests can't reference any transitive - // dependencies that have been relocated. - static Map> getBigtableViewToTagMap() { - Map> map = new HashMap<>(); - for (View view : BuiltinViews.BIGTABLE_BUILTIN_VIEWS) { - List tagKeys = view.getColumns(); - map.put( - view.getName().asString(), - tagKeys.stream().map(tagKey -> tagKey.getName()).collect(Collectors.toList())); - } - return map; - } -} diff --git a/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt b/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt deleted file mode 100644 index 261eeb9e9f..0000000000 --- a/google-cloud-bigtable-stats/src/main/resources/META-INF/license/apache2-LICENSE.txt +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java deleted file mode 100644 index e72b54f0bd..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/BigtableCreateTimeSeriesExporterTest.java +++ /dev/null @@ -1,321 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertThat; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -import com.google.api.MonitoredResource; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.cloud.monitoring.v3.stub.MetricServiceStub; -import com.google.common.collect.ImmutableMap; -import com.google.monitoring.v3.CreateTimeSeriesRequest; -import com.google.protobuf.Empty; -import io.opencensus.common.Timestamp; -import io.opencensus.contrib.resource.util.CloudResource; -import io.opencensus.contrib.resource.util.ContainerResource; -import io.opencensus.contrib.resource.util.HostResource; -import io.opencensus.metrics.LabelKey; -import io.opencensus.metrics.LabelValue; -import io.opencensus.metrics.export.Metric; -import io.opencensus.metrics.export.MetricDescriptor; -import io.opencensus.metrics.export.Point; -import io.opencensus.metrics.export.TimeSeries; -import io.opencensus.metrics.export.Value; -import io.opencensus.resource.Resource; -import java.util.Arrays; -import org.junit.After; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.MockitoJUnit; -import org.mockito.junit.MockitoRule; - -@RunWith(JUnit4.class) -public class BigtableCreateTimeSeriesExporterTest { - - private static final String bigtableProjectId = "fake-bigtable-project"; - private static final String bigtableInstanceId = "fake-bigtable-instance"; - private static final String appProfileId = "default"; - private static final String tableId = "fake-table"; - private static final String bigtableZone = "us-east-1"; - private static final String bigtableCluster = "cluster-1"; - private static final String clientName = "client-name"; - private static final String gceProjectId = "fake-gce-project"; - private static final String gkeProjectId = "fake-gke-project"; - - @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); - - @Mock private MetricServiceStub mockMetricServiceStub; - private MetricServiceClient fakeMetricServiceClient; - - @Before - public void setUp() { - - fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub); - } - - @After - public void tearDown() {} - - @Test - public void testTimeSeriesForMetricWithBigtableResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter(fakeMetricServiceClient, null); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable/test", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.TABLE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLUSTER.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.ZONE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(tableId), - LabelValue.create(bigtableCluster), - LabelValue.create(bigtableZone), - LabelValue.create(appProfileId)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + bigtableProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId, - BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId, - BuiltinMeasureConstants.TABLE.getName(), tableId, - BuiltinMeasureConstants.CLUSTER.getName(), bigtableCluster, - BuiltinMeasureConstants.ZONE.getName(), bigtableZone); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - @Test - public void testTimeSeriesForMetricWithGceResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter( - fakeMetricServiceClient, - MonitoredResource.newBuilder() - .setType(BigtableStackdriverExportUtils.GCE_RESOURCE_TYPE) - .putLabels(BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, gceProjectId) - .putLabels("another-gce-key", "another-gce-value") - .build()); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - ConsumerEnvironmentUtils.ResourceUtilsWrapper resourceUtilsWrapperMock = - Mockito.mock(ConsumerEnvironmentUtils.ResourceUtilsWrapper.class); - ConsumerEnvironmentUtils.setResourceUtilsWrapper(resourceUtilsWrapperMock); - Mockito.when(resourceUtilsWrapperMock.detectOpenCensusResource()) - .thenReturn( - Resource.create( - HostResource.TYPE, - ImmutableMap.of(CloudResource.PROVIDER_KEY, CloudResource.PROVIDER_GCP))); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLIENT_NAME.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(appProfileId), - LabelValue.create(clientName)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + gceProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, - gceProjectId, - "another-gce-key", - "another-gce-value"); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.CLIENT_NAME.getName(), clientName); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - @Test - public void testTimeSeriesForMetricWithGkeResource() { - BigtableCreateTimeSeriesExporter exporter = - new BigtableCreateTimeSeriesExporter( - fakeMetricServiceClient, - MonitoredResource.newBuilder() - .setType(BigtableStackdriverExportUtils.GKE_RESOURCE_TYPE) - .putLabels(BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, gkeProjectId) - .putLabels("another-gke-key", "another-gke-value") - .build()); - ArgumentCaptor argumentCaptor = - ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); - - UnaryCallable mockCallable = mock(UnaryCallable.class); - when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); - when(mockCallable.call(argumentCaptor.capture())).thenReturn(Empty.getDefaultInstance()); - - ConsumerEnvironmentUtils.ResourceUtilsWrapper resourceUtilsWrapperMock = - Mockito.mock(ConsumerEnvironmentUtils.ResourceUtilsWrapper.class); - ConsumerEnvironmentUtils.setResourceUtilsWrapper(resourceUtilsWrapperMock); - - Mockito.when(resourceUtilsWrapperMock.detectOpenCensusResource()) - .thenReturn( - Resource.create( - ContainerResource.TYPE, - ImmutableMap.of(CloudResource.PROVIDER_KEY, CloudResource.PROVIDER_GCP))); - - double fakeValue = 10.0; - Metric fakeMetric = - Metric.create( - MetricDescriptor.create( - "bigtable.googleapis.com/internal/client/per_connection_error_count", - "description", - "ms", - MetricDescriptor.Type.CUMULATIVE_DOUBLE, - Arrays.asList( - LabelKey.create(BuiltinMeasureConstants.PROJECT_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.INSTANCE_ID.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.APP_PROFILE.getName(), ""), - LabelKey.create(BuiltinMeasureConstants.CLIENT_NAME.getName(), ""))), - Arrays.asList( - TimeSeries.create( - Arrays.asList( - LabelValue.create(bigtableProjectId), - LabelValue.create(bigtableInstanceId), - LabelValue.create(appProfileId), - LabelValue.create(clientName)), - Arrays.asList( - Point.create( - Value.doubleValue(fakeValue), - Timestamp.fromMillis(System.currentTimeMillis()))), - Timestamp.fromMillis(System.currentTimeMillis())))); - - exporter.export(Arrays.asList(fakeMetric)); - - CreateTimeSeriesRequest request = argumentCaptor.getValue(); - - assertThat(request.getName()).isEqualTo("projects/" + gkeProjectId); - assertThat(request.getTimeSeriesList()).hasSize(1); - - com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); - - assertThat(timeSeries.getResource().getLabelsMap()) - .containsExactly( - BigtableStackdriverExportUtils.GCE_OR_GKE_PROJECT_ID_KEY, - gkeProjectId, - "another-gke-key", - "another-gke-value"); - - assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.PROJECT_ID.getName(), bigtableProjectId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.INSTANCE_ID.getName(), bigtableInstanceId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.APP_PROFILE.getName(), appProfileId); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsAtLeast(BuiltinMeasureConstants.CLIENT_NAME.getName(), clientName); - assertThat(timeSeries.getMetric().getLabelsMap()) - .containsKey(BuiltinMeasureConstants.CLIENT_UID.getName()); - - assertThat(timeSeries.getPoints(0).getValue().getDoubleValue()).isEqualTo(fakeValue); - } - - private class FakeMetricServiceClient extends MetricServiceClient { - - protected FakeMetricServiceClient(MetricServiceStub stub) { - super(stub); - } - } -} diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java deleted file mode 100644 index c2dcc2a602..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/ITBuiltinViewConstantsTest.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertWithMessage; - -import java.util.List; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class ITBuiltinViewConstantsTest { - @Test - public void testBasicTagsExistForAllViews() { - Map> viewToTagMap = StatsWrapper.getBigtableViewToTagMap(); - for (String view : viewToTagMap.keySet()) { - assertWithMessage(view + " should have all basic tags") - .that(viewToTagMap.get(view)) - .containsAtLeast( - "project_id", "instance", "app_profile", "method", "zone", "cluster", "table"); - } - } -} diff --git a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java b/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java deleted file mode 100644 index 829202510c..0000000000 --- a/google-cloud-bigtable-stats/src/test/java/com/google/cloud/bigtable/stats/StatsRecorderWrapperTest.java +++ /dev/null @@ -1,513 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.stats; - -import static com.google.common.truth.Truth.assertThat; - -import com.google.api.gax.tracing.ApiTracerFactory; -import com.google.api.gax.tracing.SpanName; -import com.google.common.collect.ImmutableMap; -import io.opencensus.impl.stats.StatsComponentImpl; -import io.opencensus.stats.AggregationData; -import io.opencensus.stats.StatsComponent; -import io.opencensus.stats.View; -import io.opencensus.stats.ViewData; -import io.opencensus.stats.ViewManager; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -// Can only be run as a unit test. Opencensus classes will be relocated when they are packaged but -// the integration test files will not be. So the integration tests can't reference any transitive -// dependencies that have been relocated. To work around this, we'll have to move all the reference -// to opencensus to StatsWrapper. -@RunWith(JUnit4.class) -public class StatsRecorderWrapperTest { - - private final String PROJECT_ID = "fake-project"; - private final String INSTANCE_ID = "fake-instance"; - private final String APP_PROFILE_ID = "fake-app-profile"; - - private final String TABLE_ID = "fake-table-id"; - private final String ZONE = "fake-zone"; - private final String CLUSTER = "fake-cluster"; - private final String CLIENT_AND_VERSION = "bigtable-java/fake-version"; - - private final StatsComponent statsComponent = new StatsComponentImpl(); - - @Before - public void setup() { - BuiltinViews views = new BuiltinViews(); - views.registerPrivateViews(statsComponent.getViewManager()); - } - - @Test - public void testStreamingOperation() throws InterruptedException { - StatsRecorderWrapper recorderWrapper = - new StatsRecorderWrapper( - ApiTracerFactory.OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - ImmutableMap.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), - PROJECT_ID, - BuiltinMeasureConstants.INSTANCE_ID.getName(), - INSTANCE_ID, - BuiltinMeasureConstants.APP_PROFILE.getName(), - APP_PROFILE_ID, - BuiltinMeasureConstants.CLIENT_NAME.getName(), - CLIENT_AND_VERSION), - statsComponent.getStatsRecorder()); - - long operationLatency = 1234; - int attemptCount = 2; - long attemptLatency = 56; - long serverLatency = 78; - long applicationLatency = 901; - long connectivityErrorCount = 15; - long throttlingLatency = 50; - long firstResponseLatency = 90; - - recorderWrapper.putOperationLatencies(operationLatency); - recorderWrapper.putRetryCount(attemptCount); - recorderWrapper.putAttemptLatencies(attemptLatency); - recorderWrapper.putApplicationLatencies(applicationLatency); - recorderWrapper.putGfeLatencies(serverLatency); - recorderWrapper.putGfeMissingHeaders(connectivityErrorCount); - recorderWrapper.putFirstResponseLatencies(firstResponseLatency); - recorderWrapper.putClientBlockingLatencies(throttlingLatency); - - recorderWrapper.recordOperation("OK", TABLE_ID, ZONE, CLUSTER); - recorderWrapper.recordAttempt("OK", TABLE_ID, ZONE, CLUSTER); - - Thread.sleep(100); - - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, "OK", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(operationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.RETRY_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(serverLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "true"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(applicationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(connectivityErrorCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.ReadRows", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(throttlingLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.ReadRows", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.STATUS, - "OK", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(firstResponseLatency); - } - - @Test - public void testUnaryOperations() throws InterruptedException { - StatsRecorderWrapper recorderWrapper = - new StatsRecorderWrapper( - ApiTracerFactory.OperationType.Unary, - SpanName.of("Bigtable", "MutateRow"), - ImmutableMap.of( - BuiltinMeasureConstants.PROJECT_ID.getName(), PROJECT_ID, - BuiltinMeasureConstants.INSTANCE_ID.getName(), INSTANCE_ID, - BuiltinMeasureConstants.APP_PROFILE.getName(), APP_PROFILE_ID, - BuiltinMeasureConstants.CLIENT_NAME.getName(), CLIENT_AND_VERSION), - statsComponent.getStatsRecorder()); - - long operationLatency = 1234; - int attemptCount = 2; - long attemptLatency = 56; - long serverLatency = 78; - long applicationLatency = 901; - long connectivityErrorCount = 15; - long throttlingLatency = 50; - long firstResponseLatency = 90; - - recorderWrapper.putOperationLatencies(operationLatency); - recorderWrapper.putRetryCount(attemptCount); - recorderWrapper.putAttemptLatencies(attemptLatency); - recorderWrapper.putApplicationLatencies(applicationLatency); - recorderWrapper.putGfeLatencies(serverLatency); - recorderWrapper.putGfeMissingHeaders(connectivityErrorCount); - recorderWrapper.putFirstResponseLatencies(firstResponseLatency); - recorderWrapper.putClientBlockingLatencies(throttlingLatency); - - recorderWrapper.recordOperation("UNAVAILABLE", TABLE_ID, ZONE, CLUSTER); - recorderWrapper.recordAttempt("UNAVAILABLE", TABLE_ID, ZONE, CLUSTER); - - Thread.sleep(100); - - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.OPERATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(operationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.ATTEMPT_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.RETRY_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(attemptCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.SERVER_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(serverLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.APPLICATION_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.STREAMING, - "false"), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(applicationLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.CONNECTIVITY_ERROR_COUNT_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION, - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(connectivityErrorCount); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.THROTTLING_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, "Bigtable.MutateRow", - BuiltinMeasureConstants.TABLE, TABLE_ID, - BuiltinMeasureConstants.ZONE, ZONE, - BuiltinMeasureConstants.CLUSTER, CLUSTER, - BuiltinMeasureConstants.CLIENT_NAME, CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(throttlingLatency); - assertThat( - getAggregationValueAsLong( - BuiltinViewConstants.FIRST_RESPONSE_LATENCIES_VIEW, - ImmutableMap.of( - BuiltinMeasureConstants.METHOD, - "Bigtable.MutateRow", - BuiltinMeasureConstants.TABLE, - TABLE_ID, - BuiltinMeasureConstants.ZONE, - ZONE, - BuiltinMeasureConstants.CLUSTER, - CLUSTER, - BuiltinMeasureConstants.STATUS, - "UNAVAILABLE", - BuiltinMeasureConstants.CLIENT_NAME, - CLIENT_AND_VERSION), - PROJECT_ID, - INSTANCE_ID, - APP_PROFILE_ID, - statsComponent.getViewManager())) - .isEqualTo(firstResponseLatency); - } - - long getAggregationValueAsLong( - View view, - ImmutableMap tags, - String projectId, - String instanceId, - String appProfileId, - ViewManager viewManager) { - ViewData viewData = viewManager.getView(view.getName()); - Map, AggregationData> aggregationMap = - Objects.requireNonNull(viewData).getAggregationMap(); - - List tagValues = new ArrayList<>(); - - for (TagKey column : view.getColumns()) { - if (BuiltinMeasureConstants.PROJECT_ID == column) { - tagValues.add(TagValue.create(projectId)); - } else if (BuiltinMeasureConstants.INSTANCE_ID == column) { - tagValues.add(TagValue.create(instanceId)); - } else if (BuiltinMeasureConstants.APP_PROFILE == column) { - tagValues.add(TagValue.create(appProfileId)); - } else { - tagValues.add(TagValue.create(tags.get(column))); - } - } - - AggregationData aggregationData = aggregationMap.get(tagValues); - - return aggregationData.match( - arg -> (long) arg.getSum(), - AggregationData.SumDataLong::getSum, - arg -> arg.getCount(), - arg -> (long) arg.getMean(), - arg -> (long) arg.getLastValue(), - AggregationData.LastValueDataLong::getLastValue, - arg -> { - throw new UnsupportedOperationException(); - }); - } -} diff --git a/google-cloud-bigtable/clirr-ignored-differences.xml b/google-cloud-bigtable/clirr-ignored-differences.xml index 7ac7946561..034168c2a1 100644 --- a/google-cloud-bigtable/clirr-ignored-differences.xml +++ b/google-cloud-bigtable/clirr-ignored-differences.xml @@ -163,6 +163,12 @@ 8001 com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerBatchedUnaryCallable + + + 7004 + com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory + * + 6001 @@ -188,6 +194,11 @@ * + + 7004 + com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker + * + 7012 com/google/cloud/bigtable/data/v2/models/MutationApi diff --git a/google-cloud-bigtable/pom.xml b/google-cloud-bigtable/pom.xml index 43619f3cf9..3783a404c4 100644 --- a/google-cloud-bigtable/pom.xml +++ b/google-cloud-bigtable/pom.xml @@ -2,7 +2,7 @@ 4.0.0 google-cloud-bigtable - 2.37.0 + 2.38.0 jar Google Cloud Bigtable https://github.com/googleapis/java-bigtable @@ -12,11 +12,11 @@ com.google.cloud google-cloud-bigtable-parent - 2.37.0 + 2.38.0 - 2.37.0 + 2.38.0 google-cloud-bigtable @@ -47,14 +47,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.0 + 2.38.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.37.0 + 2.38.0 pom import @@ -64,19 +64,6 @@ - - com.google.cloud - google-cloud-bigtable-stats - - - - io.opencensus - * - - - com.google.api @@ -229,6 +216,41 @@ threetenbp + + + io.opentelemetry + opentelemetry-api + + + io.opentelemetry + opentelemetry-sdk + + + io.opentelemetry + opentelemetry-sdk-metrics + + + io.opentelemetry + opentelemetry-sdk-common + + + com.google.cloud.opentelemetry + detector-resources-support + + + io.opentelemetry + opentelemetry-sdk-testing + test + + + com.google.cloud + google-cloud-monitoring + + + com.google.api.grpc + proto-google-cloud-monitoring-v3 + + com.google.api @@ -272,23 +294,6 @@ - - com.google.cloud - google-cloud-monitoring - - - - io.perfmark - perfmark-api - - - runtime - - - com.google.api.grpc - proto-google-cloud-monitoring-v3 - runtime - com.google.truth truth diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java index e9befe0974..257406eb45 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java @@ -20,6 +20,6 @@ @InternalApi("For internal use only") public final class Version { // {x-version-update-start:google-cloud-bigtable:current} - public static String VERSION = "2.37.0"; + public static String VERSION = "2.38.0"; // {x-version-update-end} } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java index c35500a189..9b2f2e345f 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java @@ -19,7 +19,10 @@ import com.google.api.gax.core.BackgroundResource; import com.google.api.gax.rpc.ClientContext; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; +import io.opentelemetry.api.OpenTelemetry; import java.io.IOException; +import java.util.logging.Level; +import java.util.logging.Logger; import javax.annotation.Nonnull; /** @@ -62,8 +65,12 @@ */ @BetaApi("This feature is currently experimental and can change in the future") public final class BigtableDataClientFactory implements AutoCloseable { + + private static final Logger logger = Logger.getLogger(BigtableDataClientFactory.class.getName()); + private final BigtableDataSettings defaultSettings; private final ClientContext sharedClientContext; + private final OpenTelemetry openTelemetry; /** * Create a instance of this factory. @@ -75,13 +82,28 @@ public static BigtableDataClientFactory create(BigtableDataSettings defaultSetti throws IOException { ClientContext sharedClientContext = EnhancedBigtableStub.createClientContext(defaultSettings.getStubSettings()); - return new BigtableDataClientFactory(sharedClientContext, defaultSettings); + OpenTelemetry openTelemetry = null; + try { + // We don't want client side metrics to crash the client, so catch any exception when getting + // the OTEL instance and log the exception instead. + openTelemetry = + EnhancedBigtableStub.getOpenTelemetry( + defaultSettings.getProjectId(), + defaultSettings.getMetricsProvider(), + sharedClientContext.getCredentials()); + } catch (Throwable t) { + logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t); + } + return new BigtableDataClientFactory(sharedClientContext, defaultSettings, openTelemetry); } private BigtableDataClientFactory( - ClientContext sharedClientContext, BigtableDataSettings defaultSettings) { + ClientContext sharedClientContext, + BigtableDataSettings defaultSettings, + OpenTelemetry openTelemetry) { this.sharedClientContext = sharedClientContext; this.defaultSettings = defaultSettings; + this.openTelemetry = openTelemetry; } /** @@ -112,7 +134,7 @@ public BigtableDataClient createDefault() { .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - defaultSettings.getStubSettings())) + defaultSettings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(defaultSettings, clientContext); @@ -140,7 +162,8 @@ public BigtableDataClient createForAppProfile(@Nonnull String appProfileId) thro sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); } @@ -168,7 +191,8 @@ public BigtableDataClient createForInstance(@Nonnull String projectId, @Nonnull sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); @@ -197,7 +221,8 @@ public BigtableDataClient createForInstance( sharedClientContext .toBuilder() .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory(settings.getStubSettings())) + EnhancedBigtableStub.createBigtableTracerFactory( + settings.getStubSettings(), openTelemetry)) .build(); return BigtableDataClient.createWithClientContext(settings, clientContext); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java index 701a5e8e49..928159aa6d 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java @@ -25,19 +25,16 @@ import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; import com.google.api.gax.rpc.UnaryCallSettings; import com.google.auth.Credentials; -import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.stub.BigtableBatchingCallSettings; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.BigtableStackdriverStatsExporter; -import com.google.cloud.bigtable.stats.BuiltinViews; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.common.base.MoreObjects; import com.google.common.base.Strings; import io.grpc.ManagedChannelBuilder; import java.io.IOException; import java.util.List; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Logger; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -77,7 +74,10 @@ public final class BigtableDataSettings { private static final Logger LOGGER = Logger.getLogger(BigtableDataSettings.class.getName()); private static final String BIGTABLE_EMULATOR_HOST_ENV_VAR = "BIGTABLE_EMULATOR_HOST"; - private static final AtomicBoolean BUILTIN_METRICS_REGISTERED = new AtomicBoolean(false); + // This is the legacy credential override used in the deprecated enableBuiltinMetrics method to + // override the default credentials set on the Bigtable client. Keeping it for backward + // compatibility. + @Deprecated @Nullable private static Credentials legacyMetricCredentialOverride; private final EnhancedBigtableStubSettings stubSettings; @@ -197,23 +197,34 @@ public static void enableGfeOpenCensusStats() { com.google.cloud.bigtable.data.v2.stub.metrics.RpcViews.registerBigtableClientGfeViews(); } - /** Register built in metrics. */ - public static void enableBuiltinMetrics() throws IOException { - if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) { - BuiltinViews.registerBigtableBuiltinViews(); - BigtableStackdriverStatsExporter.register(GoogleCredentials.getApplicationDefault()); - } - } + /** + * Register built in metrics. + * + * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default + * now. Please refer to {@link + * BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} on how to enable or + * disable built-in metrics. + */ + @Deprecated + public static void enableBuiltinMetrics() throws IOException {} /** * Register built in metrics with credentials. The credentials need to have metric write access * for all the projects you're publishing to. + * + * @deprecated This is a no-op that doesn't do anything. Builtin metrics are enabled by default + * now. Please refer {@link BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)} + * on how to enable or disable built-in metrics. */ + @Deprecated public static void enableBuiltinMetrics(Credentials credentials) throws IOException { - if (BUILTIN_METRICS_REGISTERED.compareAndSet(false, true)) { - BuiltinViews.registerBigtableBuiltinViews(); - BigtableStackdriverStatsExporter.register(credentials); - } + BigtableDataSettings.legacyMetricCredentialOverride = credentials; + } + + /** Get the metrics credentials if it's set by {@link #enableBuiltinMetrics(Credentials)}. */ + @InternalApi + public static Credentials getMetricsCredentials() { + return legacyMetricCredentialOverride; } /** Returns the target project id. */ @@ -278,6 +289,11 @@ public boolean isBulkMutationFlowControlEnabled() { return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled(); } + /** Gets the {@link MetricsProvider}. * */ + public MetricsProvider getMetricsProvider() { + return stubSettings.getMetricsProvider(); + } + /** Returns the underlying RPC settings. */ public EnhancedBigtableStubSettings getStubSettings() { return stubSettings; @@ -527,6 +543,30 @@ public boolean isBulkMutationFlowControlEnabled() { return stubSettings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled(); } + /** + * Sets the {@link MetricsProvider}. + * + *

By default, this is set to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will + * collect and export client side metrics. + * + *

To disable client side metrics, set it to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}. + * + *

To use a custom OpenTelemetry instance, refer to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to + * set it up. + */ + public Builder setMetricsProvider(MetricsProvider metricsProvider) { + stubSettings.setMetricsProvider(metricsProvider); + return this; + } + + /** Gets the {@link MetricsProvider}. */ + public MetricsProvider getMetricsProvider() { + return stubSettings.getMetricsProvider(); + } + /** * Returns the underlying settings for making RPC calls. The settings should be changed with * care. diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index ec15c4131a..57d9748cca 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -15,6 +15,11 @@ */ package com.google.cloud.bigtable.data.v2.stub; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; + import com.google.api.core.ApiFunction; import com.google.api.core.BetaApi; import com.google.api.core.InternalApi; @@ -68,6 +73,7 @@ import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.SampleRowKeysResponse; import com.google.cloud.bigtable.Version; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.internal.RequestContext; @@ -97,8 +103,12 @@ import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory; import com.google.cloud.bigtable.data.v2.stub.metrics.CompositeTracerFactory; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; +import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.ErrorCountPerConnectionMetricTracker; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsTracerFactory; +import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants; import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersServerStreamingCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersUnaryCallable; @@ -130,6 +140,8 @@ import io.opencensus.tags.TagValue; import io.opencensus.tags.Tagger; import io.opencensus.tags.Tags; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -137,6 +149,8 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.logging.Level; +import java.util.logging.Logger; import javax.annotation.Nonnull; import javax.annotation.Nullable; @@ -154,6 +168,9 @@ */ @InternalApi public class EnhancedBigtableStub implements AutoCloseable { + + private static final Logger logger = Logger.getLogger(EnhancedBigtableStub.class.getName()); + private static final String CLIENT_NAME = "Bigtable"; private static final long FLOW_CONTROL_ADJUSTING_INTERVAL_MS = TimeUnit.SECONDS.toMillis(20); private final EnhancedBigtableStubSettings settings; @@ -185,10 +202,25 @@ public class EnhancedBigtableStub implements AutoCloseable { public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings) throws IOException { - settings = settings.toBuilder().setTracerFactory(createBigtableTracerFactory(settings)).build(); ClientContext clientContext = createClientContext(settings); - - return new EnhancedBigtableStub(settings, clientContext); + OpenTelemetry openTelemetry = null; + try { + // We don't want client side metrics to crash the client, so catch any exception when getting + // the OTEL instance and log the exception instead. + openTelemetry = + getOpenTelemetry( + settings.getProjectId(), + settings.getMetricsProvider(), + clientContext.getCredentials()); + } catch (Throwable t) { + logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t); + } + ClientContext contextWithTracer = + clientContext + .toBuilder() + .setTracerFactory(createBigtableTracerFactory(settings, openTelemetry)) + .build(); + return new EnhancedBigtableStub(settings, contextWithTracer); } public static EnhancedBigtableStub createWithClientContext( @@ -207,15 +239,33 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set // workaround JWT audience issues patchCredentials(builder); + // Fix the credentials so that they can be shared + Credentials credentials = null; + if (builder.getCredentialsProvider() != null) { + credentials = builder.getCredentialsProvider().getCredentials(); + } + builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); + InstantiatingGrpcChannelProvider.Builder transportProvider = builder.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider ? ((InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider()).toBuilder() : null; + OpenTelemetry openTelemetry = null; + try { + // We don't want client side metrics to crash the client, so catch any exception when getting + // the OTEL instance and log the exception instead. + openTelemetry = + getOpenTelemetry(settings.getProjectId(), settings.getMetricsProvider(), credentials); + } catch (Throwable t) { + logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t); + } ErrorCountPerConnectionMetricTracker errorCountPerConnectionMetricTracker; - if (transportProvider != null) { + // Skip setting up ErrorCountPerConnectionMetricTracker if openTelemetry is null + if (openTelemetry != null && transportProvider != null) { errorCountPerConnectionMetricTracker = - new ErrorCountPerConnectionMetricTracker(createBuiltinAttributes(builder)); + new ErrorCountPerConnectionMetricTracker( + openTelemetry, createBuiltinAttributes(settings)); ApiFunction oldChannelConfigurator = transportProvider.getChannelConfigurator(); transportProvider.setChannelConfigurator( @@ -237,12 +287,6 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set // Inject channel priming if (settings.isRefreshingChannel()) { - // Fix the credentials so that they can be shared - Credentials credentials = null; - if (builder.getCredentialsProvider() != null) { - credentials = builder.getCredentialsProvider().getCredentials(); - } - builder.setCredentialsProvider(FixedCredentialsProvider.create(credentials)); if (transportProvider != null) { transportProvider.setChannelPrimer( @@ -267,13 +311,19 @@ public static ClientContext createClientContext(EnhancedBigtableStubSettings set } public static ApiTracerFactory createBigtableTracerFactory( - EnhancedBigtableStubSettings settings) { - return createBigtableTracerFactory(settings, Tags.getTagger(), Stats.getStatsRecorder()); + EnhancedBigtableStubSettings settings, @Nullable OpenTelemetry openTelemetry) + throws IOException { + return createBigtableTracerFactory( + settings, Tags.getTagger(), Stats.getStatsRecorder(), openTelemetry); } @VisibleForTesting public static ApiTracerFactory createBigtableTracerFactory( - EnhancedBigtableStubSettings settings, Tagger tagger, StatsRecorder stats) { + EnhancedBigtableStubSettings settings, + Tagger tagger, + StatsRecorder stats, + @Nullable OpenTelemetry openTelemetry) + throws IOException { String projectId = settings.getProjectId(); String instanceId = settings.getInstanceId(); String appProfileId = settings.getAppProfileId(); @@ -284,10 +334,10 @@ public static ApiTracerFactory createBigtableTracerFactory( .put(RpcMeasureConstants.BIGTABLE_INSTANCE_ID, TagValue.create(instanceId)) .put(RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID, TagValue.create(appProfileId)) .build(); - ImmutableMap builtinAttributes = createBuiltinAttributes(settings.toBuilder()); - return new CompositeTracerFactory( - ImmutableList.of( + ImmutableList.Builder tracerFactories = ImmutableList.builder(); + tracerFactories + .add( // Add OpenCensus Tracing new OpencensusTracerFactory( ImmutableMap.builder() @@ -299,22 +349,52 @@ public static ApiTracerFactory createBigtableTracerFactory( .put("gax", GaxGrpcProperties.getGaxGrpcVersion()) .put("grpc", GaxGrpcProperties.getGrpcVersion()) .put("gapic", Version.VERSION) - .build()), - // Add OpenCensus Metrics - MetricsTracerFactory.create(tagger, stats, attributes), - BuiltinMetricsTracerFactory.create(builtinAttributes), - // Add user configured tracer - settings.getTracerFactory())); + .build())) + // Add OpenCensus Metrics + .add(MetricsTracerFactory.create(tagger, stats, attributes)) + // Add user configured tracer + .add(settings.getTracerFactory()); + BuiltinMetricsTracerFactory builtinMetricsTracerFactory = + openTelemetry != null + ? BuiltinMetricsTracerFactory.create(openTelemetry, createBuiltinAttributes(settings)) + : null; + if (builtinMetricsTracerFactory != null) { + tracerFactories.add(builtinMetricsTracerFactory); + } + return new CompositeTracerFactory(tracerFactories.build()); + } + + @Nullable + public static OpenTelemetry getOpenTelemetry( + String projectId, MetricsProvider metricsProvider, @Nullable Credentials defaultCredentials) + throws IOException { + if (metricsProvider instanceof CustomOpenTelemetryMetricsProvider) { + CustomOpenTelemetryMetricsProvider customMetricsProvider = + (CustomOpenTelemetryMetricsProvider) metricsProvider; + return customMetricsProvider.getOpenTelemetry(); + } else if (metricsProvider instanceof DefaultMetricsProvider) { + Credentials credentials = + BigtableDataSettings.getMetricsCredentials() != null + ? BigtableDataSettings.getMetricsCredentials() + : defaultCredentials; + DefaultMetricsProvider defaultMetricsProvider = (DefaultMetricsProvider) metricsProvider; + return defaultMetricsProvider.getOpenTelemetry(projectId, credentials); + } else if (metricsProvider instanceof NoopMetricsProvider) { + return null; + } + throw new IOException("Invalid MetricsProvider type " + metricsProvider); } - private static ImmutableMap createBuiltinAttributes( - EnhancedBigtableStubSettings.Builder builder) { - return ImmutableMap.builder() - .put("project_id", builder.getProjectId()) - .put("instance", builder.getInstanceId()) - .put("app_profile", builder.getAppProfileId()) - .put("client_name", "bigtable-java/" + Version.VERSION) - .build(); + private static Attributes createBuiltinAttributes(EnhancedBigtableStubSettings settings) { + return Attributes.of( + BIGTABLE_PROJECT_ID_KEY, + settings.getProjectId(), + INSTANCE_ID_KEY, + settings.getInstanceId(), + APP_PROFILE_KEY, + settings.getAppProfileId(), + CLIENT_NAME_KEY, + "bigtable-java/" + Version.VERSION); } private static void patchCredentials(EnhancedBigtableStubSettings.Builder settings) diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index 9a5027c740..f07a8fb7fc 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -44,6 +44,8 @@ import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider; +import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor; import com.google.common.base.MoreObjects; @@ -229,6 +231,8 @@ public class EnhancedBigtableStubSettings extends StubSettings getJwtAudienceMapping() { return jwtAudienceMapping; } + public MetricsProvider getMetricsProvider() { + return metricsProvider; + } + /** * Gets if routing cookie is enabled. If true, client will retry a request with extra metadata * server sent back. @@ -636,6 +645,8 @@ public static class Builder extends StubSettings.Builder jwtAudienceMapping) { return this; } + /** + * Sets the {@link MetricsProvider}. + * + *

By default, this is set to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider#INSTANCE} which will + * collect and export client side metrics. + * + *

To disable client side metrics, set it to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider#INSTANCE}. + * + *

To use a custom OpenTelemetry instance, refer to {@link + * com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider} on how to + * set it up. + */ + public Builder setMetricsProvider(MetricsProvider metricsProvider) { + this.metricsProvider = Preconditions.checkNotNull(metricsProvider); + return this; + } + + /** Gets the {@link MetricsProvider}. */ + public MetricsProvider getMetricsProvider() { + return this.metricsProvider; + } + @InternalApi("Used for internal testing") public Map getJwtAudienceMapping() { return jwtAudienceMapping; @@ -1028,6 +1067,11 @@ public EnhancedBigtableStubSettings build() { featureFlags.setRoutingCookie(this.getEnableRoutingCookie()); featureFlags.setRetryInfo(this.getEnableRetryInfo()); + // client_Side_metrics_enabled feature flag is only set when a user is running with a + // DefaultMetricsProvider. This may cause false negatives when a user registered the + // metrics on their CustomOpenTelemetryMetricsProvider. + featureFlags.setClientSideMetricsEnabled( + this.getMetricsProvider() instanceof DefaultMetricsProvider); // Serialize the web64 encode the bigtable feature flags ByteArrayOutputStream boas = new ByteArrayOutputStream(); @@ -1080,6 +1124,7 @@ public String toString() { generateInitialChangeStreamPartitionsSettings) .add("readChangeStreamSettings", readChangeStreamSettings) .add("pingAndWarmSettings", pingAndWarmSettings) + .add("metricsProvider", metricsProvider) .add("parent", super.toString()) .toString(); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java index 6208fce89e..97cc2f73ec 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java @@ -86,7 +86,7 @@ public void call( stopwatch.stop(); if (context.getTracer() instanceof BigtableTracer) { ((BigtableTracer) context.getTracer()) - .batchRequestThrottled(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + .batchRequestThrottled(stopwatch.elapsed(TimeUnit.NANOSECONDS)); } RateLimitingResponseObserver innerObserver = new RateLimitingResponseObserver(limiter, lastQpsChangeTime, responseObserver); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java new file mode 100644 index 0000000000..81473ae4d4 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java @@ -0,0 +1,364 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; + +import com.google.api.MonitoredResource; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.InternalApi; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.auth.Credentials; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.MetricServiceSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import org.threeten.bp.Duration; + +/** + * Bigtable Cloud Monitoring OpenTelemetry Exporter. + * + *

The exporter will look for all bigtable owned metrics under bigtable.googleapis.com + * instrumentation scope and upload it via the Google Cloud Monitoring API. + */ +@InternalApi +public final class BigtableCloudMonitoringExporter implements MetricExporter { + + private static final Logger logger = + Logger.getLogger(BigtableCloudMonitoringExporter.class.getName()); + + // This system property can be used to override the monitoring endpoint + // to a different environment. It's meant for internal testing only. + private static final String MONITORING_ENDPOINT = + MoreObjects.firstNonNull( + System.getProperty("bigtable.test-monitoring-endpoint"), + MetricServiceSettings.getDefaultEndpoint()); + + private static final String APPLICATION_RESOURCE_PROJECT_ID = "project_id"; + + private final MetricServiceClient client; + + private final String bigtableProjectId; + private final String taskId; + + // The resource the client application is running on + private final MonitoredResource applicationResource; + + private final AtomicBoolean isShutdown = new AtomicBoolean(false); + + private CompletableResultCode lastExportCode; + + private static final ImmutableList BIGTABLE_TABLE_METRICS = + ImmutableSet.of( + OPERATION_LATENCIES_NAME, + ATTEMPT_LATENCIES_NAME, + SERVER_LATENCIES_NAME, + FIRST_RESPONSE_LATENCIES_NAME, + CLIENT_BLOCKING_LATENCIES_NAME, + APPLICATION_BLOCKING_LATENCIES_NAME, + RETRY_COUNT_NAME, + CONNECTIVITY_ERROR_COUNT_NAME) + .stream() + .map(m -> METER_NAME + m) + .collect(ImmutableList.toImmutableList()); + + private static final ImmutableList APPLICATION_METRICS = + ImmutableSet.of(PER_CONNECTION_ERROR_COUNT_NAME).stream() + .map(m -> METER_NAME + m) + .collect(ImmutableList.toImmutableList()); + + public static BigtableCloudMonitoringExporter create( + String projectId, @Nullable Credentials credentials) throws IOException { + MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder(); + CredentialsProvider credentialsProvider = + Optional.ofNullable(credentials) + .map(FixedCredentialsProvider::create) + .orElse(NoCredentialsProvider.create()); + settingsBuilder.setCredentialsProvider(credentialsProvider); + settingsBuilder.setEndpoint(MONITORING_ENDPOINT); + + org.threeten.bp.Duration timeout = Duration.ofMinutes(1); + // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving + // it as not retried for now. + settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetries(timeout); + + // Detect the resource that the client application is running on. For example, + // this could be a GCE instance or a GKE pod. Currently, we only support GCE instance and + // GKE pod. This method will return null for everything else. + MonitoredResource applicationResource = null; + try { + applicationResource = BigtableExporterUtils.detectResource(); + } catch (Exception e) { + logger.log( + Level.WARNING, + "Failed to detect resource, will skip exporting application level metrics ", + e); + } + + return new BigtableCloudMonitoringExporter( + projectId, + MetricServiceClient.create(settingsBuilder.build()), + applicationResource, + BigtableExporterUtils.getDefaultTaskValue()); + } + + @VisibleForTesting + BigtableCloudMonitoringExporter( + String projectId, + MetricServiceClient client, + @Nullable MonitoredResource applicationResource, + String taskId) { + this.client = client; + this.taskId = taskId; + this.applicationResource = applicationResource; + this.bigtableProjectId = projectId; + } + + @Override + public CompletableResultCode export(Collection collection) { + if (isShutdown.get()) { + logger.log(Level.WARNING, "Exporter is shutting down"); + return CompletableResultCode.ofFailure(); + } + + CompletableResultCode bigtableExportCode = exportBigtableResourceMetrics(collection); + CompletableResultCode applicationExportCode = exportApplicationResourceMetrics(collection); + + lastExportCode = + CompletableResultCode.ofAll(ImmutableList.of(applicationExportCode, bigtableExportCode)); + + return lastExportCode; + } + + /** Export metrics associated with a BigtableTable resource. */ + private CompletableResultCode exportBigtableResourceMetrics(Collection collection) { + // Filter bigtable table metrics + List bigtableMetricData = + collection.stream() + .filter(md -> BIGTABLE_TABLE_METRICS.contains(md.getName())) + .collect(Collectors.toList()); + + // Skips exporting if there's none + if (bigtableMetricData.isEmpty()) { + return CompletableResultCode.ofSuccess(); + } + + // Verifies metrics project id are the same as the bigtable project id set on this client + if (!bigtableMetricData.stream() + .flatMap(metricData -> metricData.getData().getPoints().stream()) + .allMatch(pd -> bigtableProjectId.equals(BigtableExporterUtils.getProjectId(pd)))) { + logger.log(Level.WARNING, "Metric data has different a projectId. Skip exporting."); + return CompletableResultCode.ofFailure(); + } + + List bigtableTimeSeries; + try { + bigtableTimeSeries = + BigtableExporterUtils.convertToBigtableTimeSeries(bigtableMetricData, taskId); + } catch (Throwable e) { + logger.log( + Level.WARNING, + "Failed to convert bigtable table metric data to cloud monitoring timeseries.", + e); + return CompletableResultCode.ofFailure(); + } + + ProjectName projectName = ProjectName.of(bigtableProjectId); + CreateTimeSeriesRequest bigtableRequest = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(bigtableTimeSeries) + .build(); + + ApiFuture future = + this.client.createServiceTimeSeriesCallable().futureCall(bigtableRequest); + + CompletableResultCode bigtableExportCode = new CompletableResultCode(); + ApiFutures.addCallback( + future, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + logger.log( + Level.WARNING, + "createServiceTimeSeries request failed for bigtable metrics. ", + throwable); + bigtableExportCode.fail(); + } + + @Override + public void onSuccess(Empty empty) { + bigtableExportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + + return bigtableExportCode; + } + + /** Export metrics associated with the resource the Application is running on. */ + private CompletableResultCode exportApplicationResourceMetrics( + Collection collection) { + if (applicationResource == null) { + return CompletableResultCode.ofSuccess(); + } + + // Filter application level metrics + List metricData = + collection.stream() + .filter(md -> APPLICATION_METRICS.contains(md.getName())) + .collect(Collectors.toList()); + + // Skip exporting if there's none + if (metricData.isEmpty()) { + return CompletableResultCode.ofSuccess(); + } + + List timeSeries; + try { + timeSeries = + BigtableExporterUtils.convertToApplicationResourceTimeSeries( + metricData, taskId, applicationResource); + } catch (Throwable e) { + logger.log( + Level.WARNING, + "Failed to convert application metric data to cloud monitoring timeseries.", + e); + return CompletableResultCode.ofFailure(); + } + + // Construct the request. The project id will be the project id of the detected monitored + // resource. + ApiFuture gceOrGkeFuture; + CompletableResultCode exportCode = new CompletableResultCode(); + try { + ProjectName projectName = + ProjectName.of(applicationResource.getLabelsOrThrow(APPLICATION_RESOURCE_PROJECT_ID)); + CreateTimeSeriesRequest request = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(timeSeries) + .build(); + + gceOrGkeFuture = this.client.createServiceTimeSeriesCallable().futureCall(request); + + ApiFutures.addCallback( + gceOrGkeFuture, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable throwable) { + logger.log( + Level.WARNING, + "createServiceTimeSeries request failed for per connection error metrics.", + throwable); + exportCode.fail(); + } + + @Override + public void onSuccess(Empty empty) { + exportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + + } catch (Exception e) { + logger.log( + Level.WARNING, + "Failed to get projectName for application resource " + applicationResource); + return CompletableResultCode.ofFailure(); + } + + return exportCode; + } + + @Override + public CompletableResultCode flush() { + if (lastExportCode != null) { + return lastExportCode; + } + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + if (!isShutdown.compareAndSet(false, true)) { + logger.log(Level.WARNING, "shutdown is called multiple times"); + return CompletableResultCode.ofSuccess(); + } + CompletableResultCode flushResult = flush(); + CompletableResultCode shutdownResult = new CompletableResultCode(); + flushResult.whenComplete( + () -> { + Throwable throwable = null; + try { + client.shutdown(); + } catch (Throwable e) { + logger.log(Level.WARNING, "failed to shutdown the monitoring client", e); + throwable = e; + } + if (throwable != null) { + shutdownResult.fail(); + } else { + shutdownResult.succeed(); + } + }); + return CompletableResultCode.ofAll(Arrays.asList(flushResult, shutdownResult)); + } + + /** + * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a + * metric over time. + */ + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return AggregationTemporality.CUMULATIVE; + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java new file mode 100644 index 0000000000..5bf6688e17 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java @@ -0,0 +1,367 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.api.Distribution.BucketOptions; +import static com.google.api.Distribution.BucketOptions.Explicit; +import static com.google.api.MetricDescriptor.MetricKind; +import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE; +import static com.google.api.MetricDescriptor.MetricKind.GAUGE; +import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED; +import static com.google.api.MetricDescriptor.ValueType; +import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; +import static com.google.api.MetricDescriptor.ValueType.DOUBLE; +import static com.google.api.MetricDescriptor.ValueType.INT64; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; + +import com.google.api.Distribution; +import com.google.api.Metric; +import com.google.api.MonitoredResource; +import com.google.cloud.opentelemetry.detection.AttributeKeys; +import com.google.cloud.opentelemetry.detection.DetectedPlatform; +import com.google.cloud.opentelemetry.detection.GCPPlatformDetector; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableSet; +import com.google.monitoring.v3.Point; +import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.monitoring.v3.TypedValue; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.data.HistogramData; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.MetricDataType; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.metrics.data.SumData; +import java.lang.management.ManagementFactory; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +/** Utils to convert OpenTelemetry types to Google Cloud Monitoring types. */ +class BigtableExporterUtils { + + private static final Logger logger = Logger.getLogger(BigtableExporterUtils.class.getName()); + + private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw"; + + // These metric labels will be promoted to the bigtable_table monitored resource fields + private static final Set> BIGTABLE_PROMOTED_RESOURCE_LABELS = + ImmutableSet.of( + BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, TABLE_ID_KEY, CLUSTER_ID_KEY, ZONE_ID_KEY); + + private BigtableExporterUtils() {} + + /** + * In most cases this should look like java-${UUID}@${hostname}. The hostname will be retrieved + * from the jvm name and fallback to the local hostname. + */ + static String getDefaultTaskValue() { + // Something like '@' + final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); + // If jvm doesn't have the expected format, fallback to the local hostname + if (jvmName.indexOf('@') < 1) { + String hostname = "localhost"; + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + logger.log(Level.INFO, "Unable to get the hostname.", e); + } + // Generate a random number and use the same format "random_number@hostname". + return "java-" + UUID.randomUUID() + "@" + hostname; + } + return "java-" + UUID.randomUUID() + jvmName; + } + + static String getProjectId(PointData pointData) { + return pointData.getAttributes().get(BIGTABLE_PROJECT_ID_KEY); + } + + static List convertToBigtableTimeSeries(List collection, String taskId) { + List allTimeSeries = new ArrayList<>(); + + for (MetricData metricData : collection) { + if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) { + // Filter out metric data for instruments that are not part of the bigtable builtin metrics + continue; + } + metricData.getData().getPoints().stream() + .map(pointData -> convertPointToBigtableTimeSeries(metricData, pointData, taskId)) + .forEach(allTimeSeries::add); + } + + return allTimeSeries; + } + + static List convertToApplicationResourceTimeSeries( + Collection collection, String taskId, MonitoredResource applicationResource) { + Preconditions.checkNotNull( + applicationResource, + "convert application metrics is called when the supported resource is not detected"); + List allTimeSeries = new ArrayList<>(); + for (MetricData metricData : collection) { + if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) { + // Filter out metric data for instruments that are not part of the bigtable builtin metrics + continue; + } + metricData.getData().getPoints().stream() + .map( + pointData -> + convertPointToApplicationResourceTimeSeries( + metricData, pointData, taskId, applicationResource)) + .forEach(allTimeSeries::add); + } + return allTimeSeries; + } + + @Nullable + static MonitoredResource detectResource() { + GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE; + DetectedPlatform detectedPlatform = detector.detectPlatform(); + MonitoredResource monitoredResource = null; + try { + switch (detectedPlatform.getSupportedPlatform()) { + case GOOGLE_COMPUTE_ENGINE: + monitoredResource = + createGceMonitoredResource( + detectedPlatform.getProjectId(), detectedPlatform.getAttributes()); + break; + case GOOGLE_KUBERNETES_ENGINE: + monitoredResource = + createGkeMonitoredResource( + detectedPlatform.getProjectId(), detectedPlatform.getAttributes()); + break; + } + } catch (IllegalStateException e) { + logger.log( + Level.WARNING, + "Failed to create monitored resource for " + detectedPlatform.getSupportedPlatform(), + e); + } + return monitoredResource; + } + + private static MonitoredResource createGceMonitoredResource( + String projectId, Map attributes) { + return MonitoredResource.newBuilder() + .setType("gce_instance") + .putLabels("project_id", projectId) + .putLabels("instance_id", getAttribute(attributes, AttributeKeys.GCE_INSTANCE_ID)) + .putLabels("zone", getAttribute(attributes, AttributeKeys.GCE_AVAILABILITY_ZONE)) + .build(); + } + + private static MonitoredResource createGkeMonitoredResource( + String projectId, Map attributes) { + return MonitoredResource.newBuilder() + .setType("k8s_container") + .putLabels("project_id", projectId) + .putLabels("location", getAttribute(attributes, AttributeKeys.GKE_CLUSTER_LOCATION)) + .putLabels("cluster_name", getAttribute(attributes, AttributeKeys.GKE_CLUSTER_NAME)) + .putLabels("namespace_name", MoreObjects.firstNonNull(System.getenv("NAMESPACE"), "")) + .putLabels("pod_name", MoreObjects.firstNonNull(System.getenv("HOSTNAME"), "")) + .putLabels("container_name", MoreObjects.firstNonNull(System.getenv("CONTAINER_NAME"), "")) + .build(); + } + + private static String getAttribute(Map attributes, String key) { + String value = attributes.get(key); + if (value == null) { + throw new IllegalStateException( + "Required attribute " + key + " does not exist in the attributes map " + attributes); + } + return value; + } + + private static TimeSeries convertPointToBigtableTimeSeries( + MetricData metricData, PointData pointData, String taskId) { + TimeSeries.Builder builder = + TimeSeries.newBuilder() + .setMetricKind(convertMetricKind(metricData)) + .setValueType(convertValueType(metricData.getType())); + Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); + + Attributes attributes = pointData.getAttributes(); + MonitoredResource.Builder monitoredResourceBuilder = + MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE); + + for (AttributeKey key : attributes.asMap().keySet()) { + if (BIGTABLE_PROMOTED_RESOURCE_LABELS.contains(key)) { + monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } else { + metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } + } + + builder.setResource(monitoredResourceBuilder.build()); + + metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId); + builder.setMetric(metricBuilder.build()); + + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); + + return builder.build(); + } + + private static TimeSeries convertPointToApplicationResourceTimeSeries( + MetricData metricData, + PointData pointData, + String taskId, + MonitoredResource applicationResource) { + TimeSeries.Builder builder = + TimeSeries.newBuilder() + .setMetricKind(convertMetricKind(metricData)) + .setValueType(convertValueType(metricData.getType())) + .setResource(applicationResource); + + Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); + + Attributes attributes = pointData.getAttributes(); + for (AttributeKey key : attributes.asMap().keySet()) { + metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); + } + + metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId); + builder.setMetric(metricBuilder.build()); + + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); + return builder.build(); + } + + private static MetricKind convertMetricKind(MetricData metricData) { + switch (metricData.getType()) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return convertHistogramType(metricData.getHistogramData()); + case LONG_GAUGE: + case DOUBLE_GAUGE: + return GAUGE; + case LONG_SUM: + return convertSumDataType(metricData.getLongSumData()); + case DOUBLE_SUM: + return convertSumDataType(metricData.getDoubleSumData()); + default: + return UNRECOGNIZED; + } + } + + private static MetricKind convertHistogramType(HistogramData histogramData) { + if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static MetricKind convertSumDataType(SumData sum) { + if (!sum.isMonotonic()) { + return GAUGE; + } + if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static ValueType convertValueType(MetricDataType metricDataType) { + switch (metricDataType) { + case LONG_GAUGE: + case LONG_SUM: + return INT64; + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return DOUBLE; + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return DISTRIBUTION; + default: + return ValueType.UNRECOGNIZED; + } + } + + private static Point createPoint( + MetricDataType type, PointData pointData, TimeInterval timeInterval) { + Point.Builder builder = Point.newBuilder().setInterval(timeInterval); + switch (type) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return builder + .setValue( + TypedValue.newBuilder() + .setDistributionValue(convertHistogramData((HistogramPointData) pointData)) + .build()) + .build(); + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return builder + .setValue( + TypedValue.newBuilder() + .setDoubleValue(((DoublePointData) pointData).getValue()) + .build()) + .build(); + case LONG_GAUGE: + case LONG_SUM: + return builder + .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue())) + .build(); + default: + logger.log(Level.WARNING, "unsupported metric type"); + return builder.build(); + } + } + + private static Distribution convertHistogramData(HistogramPointData pointData) { + return Distribution.newBuilder() + .setCount(pointData.getCount()) + .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount()) + .setBucketOptions( + BucketOptions.newBuilder() + .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries()))) + .addAllBucketCounts(pointData.getCounts()) + .build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java index 1cda49934c..3b2242385a 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java @@ -42,7 +42,7 @@ public void streamCreated(Attributes transportAttrs, Metadata headers) { @Override public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalUncompressedSize) { - tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.MILLISECONDS)); + tracer.grpcChannelQueuedLatencies(stopwatch.elapsed(TimeUnit.NANOSECONDS)); } static class Factory extends ClientStreamTracer.Factory { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java new file mode 100644 index 0000000000..d85300828b --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java @@ -0,0 +1,220 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalApi; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.sdk.metrics.Aggregation; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.View; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +/** Defining Bigtable builit-in metrics scope, attributes, metric names and views. */ +@InternalApi +public class BuiltinMetricsConstants { + + // Metric attribute keys for monitored resource + public static final AttributeKey BIGTABLE_PROJECT_ID_KEY = + AttributeKey.stringKey("project_id"); + public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance"); + public static final AttributeKey TABLE_ID_KEY = AttributeKey.stringKey("table"); + public static final AttributeKey CLUSTER_ID_KEY = AttributeKey.stringKey("cluster"); + public static final AttributeKey ZONE_ID_KEY = AttributeKey.stringKey("zone"); + + // Metric attribute keys for labels + // We need to access APP_PROFILE_KEY in EnhancedBigtableStubSettings and STREAMING_KEY in + // IT tests, so they're public. + public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); + public static final AttributeKey STREAMING_KEY = AttributeKey.booleanKey("streaming"); + public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name"); + static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid"); + + // Metric names + public static final String OPERATION_LATENCIES_NAME = "operation_latencies"; + public static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies"; + static final String RETRY_COUNT_NAME = "retry_count"; + static final String CONNECTIVITY_ERROR_COUNT_NAME = "connectivity_error_count"; + static final String SERVER_LATENCIES_NAME = "server_latencies"; + static final String FIRST_RESPONSE_LATENCIES_NAME = "first_response_latencies"; + static final String APPLICATION_BLOCKING_LATENCIES_NAME = "application_latencies"; + static final String CLIENT_BLOCKING_LATENCIES_NAME = "throttling_latencies"; + static final String PER_CONNECTION_ERROR_COUNT_NAME = "per_connection_error_count"; + + // Buckets under 100,000 are identical to buckets for server side metrics handler_latencies. + // Extending client side bucket to up to 3,200,000. + private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = + Aggregation.explicitBucketHistogram( + ImmutableList.of( + 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, + 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, + 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0, + 400000.0, 800000.0, 1600000.0, 3200000.0)); // max is 53.3 minutes + + private static final Aggregation AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM = + Aggregation.explicitBucketHistogram( + ImmutableList.of( + 1.0, + 2.0, + 4.0, + 8.0, + 16.0, + 32.0, + 64.0, + 125.0, + 250.0, + 500.0, + 1_000.0, + 2_000.0, + 4_000.0, + 8_000.0, + 16_000.0, + 32_000.0, + 64_000.0, + 128_000.0, + 250_000.0, + 500_000.0, + 1_000_000.0)); + + public static final String METER_NAME = "bigtable.googleapis.com/internal/client/"; + + static final Set COMMON_ATTRIBUTES = + ImmutableSet.of( + BIGTABLE_PROJECT_ID_KEY, + INSTANCE_ID_KEY, + TABLE_ID_KEY, + APP_PROFILE_KEY, + CLUSTER_ID_KEY, + ZONE_ID_KEY, + METHOD_KEY, + CLIENT_NAME_KEY); + + static void defineView( + ImmutableMap.Builder viewMap, + String id, + Aggregation aggregation, + InstrumentType type, + String unit, + Set attributes) { + InstrumentSelector selector = + InstrumentSelector.builder() + .setName(id) + .setMeterName(METER_NAME) + .setType(type) + .setUnit(unit) + .build(); + Set attributesFilter = + ImmutableSet.builder() + .addAll( + COMMON_ATTRIBUTES.stream().map(AttributeKey::getKey).collect(Collectors.toSet())) + .addAll(attributes.stream().map(AttributeKey::getKey).collect(Collectors.toSet())) + .build(); + View view = + View.builder() + .setName(METER_NAME + id) + .setAggregation(aggregation) + .setAttributeFilter(attributesFilter) + .build(); + + viewMap.put(selector, view); + } + + public static Map getAllViews() { + ImmutableMap.Builder views = ImmutableMap.builder(); + + defineView( + views, + OPERATION_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder() + .addAll(COMMON_ATTRIBUTES) + .add(STREAMING_KEY, STATUS_KEY) + .build()); + defineView( + views, + ATTEMPT_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder() + .addAll(COMMON_ATTRIBUTES) + .add(STREAMING_KEY, STATUS_KEY) + .build()); + defineView( + views, + SERVER_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + FIRST_RESPONSE_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + APPLICATION_BLOCKING_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); + defineView( + views, + CLIENT_BLOCKING_LATENCIES_NAME, + AGGREGATION_WITH_MILLIS_HISTOGRAM, + InstrumentType.HISTOGRAM, + "ms", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); + defineView( + views, + RETRY_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + defineView( + views, + CONNECTIVITY_ERROR_COUNT_NAME, + Aggregation.sum(), + InstrumentType.COUNTER, + "1", + ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); + + defineView( + views, + PER_CONNECTION_ERROR_COUNT_NAME, + AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM, + InstrumentType.HISTOGRAM, + "1", + ImmutableSet.builder() + .add(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME_KEY) + .build()); + + return views.build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java index 2d8262a93e..abd214d760 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java @@ -16,13 +16,22 @@ package com.google.cloud.bigtable.data.v2.stub.metrics; import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; import com.google.api.gax.retrying.ServerStreamingAttemptException; import com.google.api.gax.tracing.SpanName; -import com.google.cloud.bigtable.stats.StatsRecorderWrapper; -import com.google.common.annotations.VisibleForTesting; +import com.google.cloud.bigtable.Version; import com.google.common.base.Stopwatch; import com.google.common.math.IntMath; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; import java.util.concurrent.CancellationException; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -37,8 +46,7 @@ */ class BuiltinMetricsTracer extends BigtableTracer { - private final StatsRecorderWrapper recorder; - + private static final String NAME = "java-bigtable/" + Version.VERSION; private final OperationType operationType; private final SpanName spanName; @@ -64,21 +72,56 @@ class BuiltinMetricsTracer extends BigtableTracer { private boolean flowControlIsDisabled = false; - private AtomicInteger requestLeft = new AtomicInteger(0); + private final AtomicInteger requestLeft = new AtomicInteger(0); // Monitored resource labels private String tableId = "unspecified"; private String zone = "global"; private String cluster = "unspecified"; - private AtomicLong totalClientBlockingTime = new AtomicLong(0); + private final AtomicLong totalClientBlockingTime = new AtomicLong(0); + + private final Attributes baseAttributes; + + private Long serverLatencies = null; + + // OpenCensus (and server) histogram buckets use [start, end), however OpenTelemetry uses (start, + // end]. To work around this, we measure all the latencies in nanoseconds and convert them + // to milliseconds and use DoubleHistogram. This should minimize the chance of a data + // point fall on the bucket boundary that causes off by one errors. + private final DoubleHistogram operationLatenciesHistogram; + private final DoubleHistogram attemptLatenciesHistogram; + private final DoubleHistogram serverLatenciesHistogram; + private final DoubleHistogram firstResponseLatenciesHistogram; + private final DoubleHistogram clientBlockingLatenciesHistogram; + private final DoubleHistogram applicationBlockingLatenciesHistogram; + private final LongCounter connectivityErrorCounter; + private final LongCounter retryCounter; - @VisibleForTesting BuiltinMetricsTracer( - OperationType operationType, SpanName spanName, StatsRecorderWrapper recorder) { + OperationType operationType, + SpanName spanName, + Attributes attributes, + DoubleHistogram operationLatenciesHistogram, + DoubleHistogram attemptLatenciesHistogram, + DoubleHistogram serverLatenciesHistogram, + DoubleHistogram firstResponseLatenciesHistogram, + DoubleHistogram clientBlockingLatenciesHistogram, + DoubleHistogram applicationBlockingLatenciesHistogram, + LongCounter connectivityErrorCounter, + LongCounter retryCounter) { this.operationType = operationType; this.spanName = spanName; - this.recorder = recorder; + this.baseAttributes = attributes; + + this.operationLatenciesHistogram = operationLatenciesHistogram; + this.attemptLatenciesHistogram = attemptLatenciesHistogram; + this.serverLatenciesHistogram = serverLatenciesHistogram; + this.firstResponseLatenciesHistogram = firstResponseLatenciesHistogram; + this.clientBlockingLatenciesHistogram = clientBlockingLatenciesHistogram; + this.applicationBlockingLatenciesHistogram = applicationBlockingLatenciesHistogram; + this.connectivityErrorCounter = connectivityErrorCounter; + this.retryCounter = retryCounter; } @Override @@ -203,13 +246,8 @@ public int getAttempt() { @Override public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) { - // Record the metrics and put in the map after the attempt is done, so we can have cluster and - // zone information if (latency != null) { - recorder.putGfeLatencies(latency); - recorder.putGfeMissingHeaders(0); - } else { - recorder.putGfeMissingHeaders(1); + serverLatencies = latency; } } @@ -220,13 +258,13 @@ public void setLocations(String zone, String cluster) { } @Override - public void batchRequestThrottled(long throttledTimeMs) { - totalClientBlockingTime.addAndGet(throttledTimeMs); + public void batchRequestThrottled(long throttledTimeNanos) { + totalClientBlockingTime.addAndGet(Duration.ofNanos(throttledTimeNanos).toMillis()); } @Override - public void grpcChannelQueuedLatencies(long queuedTimeMs) { - totalClientBlockingTime.addAndGet(queuedTimeMs); + public void grpcChannelQueuedLatencies(long queuedTimeNanos) { + totalClientBlockingTime.addAndGet(queuedTimeNanos); } @Override @@ -239,26 +277,43 @@ private void recordOperationCompletion(@Nullable Throwable status) { return; } operationTimer.stop(); - long operationLatency = operationTimer.elapsed(TimeUnit.MILLISECONDS); + + boolean isStreaming = operationType == OperationType.ServerStreaming; + String statusStr = Util.extractStatus(status); + + // Publish metric data with all the attributes. The attributes get filtered in + // BuiltinMetricsConstants when we construct the views. + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(METHOD_KEY, spanName.toString()) + .put(CLIENT_NAME_KEY, NAME) + .put(STREAMING_KEY, isStreaming) + .put(STATUS_KEY, statusStr) + .build(); + long operationLatencyNano = operationTimer.elapsed(TimeUnit.NANOSECONDS); // Only record when retry count is greater than 0 so the retry // graph will be less confusing if (attemptCount > 1) { - recorder.putRetryCount(attemptCount - 1); + retryCounter.add(attemptCount - 1, attributes); } + operationLatenciesHistogram.record(convertToMs(operationLatencyNano), attributes); + // serverLatencyTimer should already be stopped in recordAttemptCompletion - recorder.putOperationLatencies(operationLatency); - recorder.putApplicationLatencies( - Duration.ofNanos(operationLatencyNano - totalServerLatencyNano.get()).toMillis()); + long applicationLatencyNano = operationLatencyNano - totalServerLatencyNano.get(); + applicationBlockingLatenciesHistogram.record(convertToMs(applicationLatencyNano), attributes); if (operationType == OperationType.ServerStreaming && spanName.getMethodName().equals("ReadRows")) { - recorder.putFirstResponseLatencies(firstResponsePerOpTimer.elapsed(TimeUnit.MILLISECONDS)); + firstResponseLatenciesHistogram.record( + convertToMs(firstResponsePerOpTimer.elapsed(TimeUnit.NANOSECONDS)), attributes); } - - recorder.recordOperation(Util.extractStatus(status), tableId, zone, cluster); } private void recordAttemptCompletion(@Nullable Throwable status) { @@ -273,8 +328,7 @@ private void recordAttemptCompletion(@Nullable Throwable status) { } } - // Make sure to reset the blocking time after recording it for the next attempt - recorder.putClientBlockingLatencies(totalClientBlockingTime.getAndSet(0)); + boolean isStreaming = operationType == OperationType.ServerStreaming; // Patch the status until it's fixed in gax. When an attempt failed, // it'll throw a ServerStreamingAttemptException. Unwrap the exception @@ -283,7 +337,35 @@ private void recordAttemptCompletion(@Nullable Throwable status) { status = status.getCause(); } - recorder.putAttemptLatencies(attemptTimer.elapsed(TimeUnit.MILLISECONDS)); - recorder.recordAttempt(Util.extractStatus(status), tableId, zone, cluster); + String statusStr = Util.extractStatus(status); + + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(METHOD_KEY, spanName.toString()) + .put(CLIENT_NAME_KEY, NAME) + .put(STREAMING_KEY, isStreaming) + .put(STATUS_KEY, statusStr) + .build(); + + clientBlockingLatenciesHistogram.record(convertToMs(totalClientBlockingTime.get()), attributes); + + attemptLatenciesHistogram.record( + convertToMs(attemptTimer.elapsed(TimeUnit.NANOSECONDS)), attributes); + + if (serverLatencies != null) { + serverLatenciesHistogram.record(serverLatencies, attributes); + connectivityErrorCounter.add(0, attributes); + } else { + connectivityErrorCounter.add(1, attributes); + } + } + + private static double convertToMs(long nanoSeconds) { + double toMs = 1e-6; + return nanoSeconds * toMs; } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java index 794997071d..f0ac656978 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java @@ -15,29 +15,112 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; + import com.google.api.core.InternalApi; import com.google.api.gax.tracing.ApiTracer; import com.google.api.gax.tracing.ApiTracerFactory; import com.google.api.gax.tracing.BaseApiTracerFactory; import com.google.api.gax.tracing.SpanName; -import com.google.cloud.bigtable.stats.StatsWrapper; -import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import java.io.IOException; /** - * {@link ApiTracerFactory} that will generate OpenCensus metrics by using the {@link ApiTracer} + * {@link ApiTracerFactory} that will generate OpenTelemetry metrics by using the {@link ApiTracer} * api. */ @InternalApi("For internal use only") public class BuiltinMetricsTracerFactory extends BaseApiTracerFactory { - private final ImmutableMap statsAttributes; + private final Attributes attributes; + + private static final String MILLISECOND = "ms"; + private static final String COUNT = "1"; - public static BuiltinMetricsTracerFactory create(ImmutableMap statsAttributes) { - return new BuiltinMetricsTracerFactory(statsAttributes); + private final DoubleHistogram operationLatenciesHistogram; + private final DoubleHistogram attemptLatenciesHistogram; + private final DoubleHistogram serverLatenciesHistogram; + private final DoubleHistogram firstResponseLatenciesHistogram; + private final DoubleHistogram clientBlockingLatenciesHistogram; + private final DoubleHistogram applicationBlockingLatenciesHistogram; + private final LongCounter connectivityErrorCounter; + private final LongCounter retryCounter; + + public static BuiltinMetricsTracerFactory create( + OpenTelemetry openTelemetry, Attributes attributes) throws IOException { + return new BuiltinMetricsTracerFactory(openTelemetry, attributes); } - private BuiltinMetricsTracerFactory(ImmutableMap statsAttributes) { - this.statsAttributes = statsAttributes; + BuiltinMetricsTracerFactory(OpenTelemetry openTelemetry, Attributes attributes) { + this.attributes = attributes; + Meter meter = openTelemetry.getMeter(METER_NAME); + + operationLatenciesHistogram = + meter + .histogramBuilder(OPERATION_LATENCIES_NAME) + .setDescription( + "Total time until final operation success or failure, including retries and backoff.") + .setUnit(MILLISECOND) + .build(); + attemptLatenciesHistogram = + meter + .histogramBuilder(ATTEMPT_LATENCIES_NAME) + .setDescription("Client observed latency per RPC attempt.") + .setUnit(MILLISECOND) + .build(); + serverLatenciesHistogram = + meter + .histogramBuilder(SERVER_LATENCIES_NAME) + .setDescription( + "The latency measured from the moment that the RPC entered the Google data center until the RPC was completed.") + .setUnit(MILLISECOND) + .build(); + firstResponseLatenciesHistogram = + meter + .histogramBuilder(FIRST_RESPONSE_LATENCIES_NAME) + .setDescription( + "Latency from operation start until the response headers were received. The publishing of the measurement will be delayed until the attempt response has been received.") + .setUnit(MILLISECOND) + .build(); + clientBlockingLatenciesHistogram = + meter + .histogramBuilder(CLIENT_BLOCKING_LATENCIES_NAME) + .setDescription( + "The artificial latency introduced by the client to limit the number of outstanding requests. The publishing of the measurement will be delayed until the attempt trailers have been received.") + .setUnit(MILLISECOND) + .build(); + applicationBlockingLatenciesHistogram = + meter + .histogramBuilder(APPLICATION_BLOCKING_LATENCIES_NAME) + .setDescription( + "The latency of the client application consuming available response data.") + .setUnit(MILLISECOND) + .build(); + connectivityErrorCounter = + meter + .counterBuilder(CONNECTIVITY_ERROR_COUNT_NAME) + .setDescription( + "Number of requests that failed to reach the Google datacenter. (Requests without google response headers") + .setUnit(COUNT) + .build(); + retryCounter = + meter + .counterBuilder(RETRY_COUNT_NAME) + .setDescription("The number of additional RPCs sent after the initial attempt.") + .setUnit(COUNT) + .build(); } @Override @@ -45,6 +128,14 @@ public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType op return new BuiltinMetricsTracer( operationType, spanName, - StatsWrapper.createRecorder(operationType, spanName, statsAttributes)); + attributes, + operationLatenciesHistogram, + attemptLatenciesHistogram, + serverLatenciesHistogram, + firstResponseLatenciesHistogram, + clientBlockingLatenciesHistogram, + applicationBlockingLatenciesHistogram, + connectivityErrorCounter, + retryCounter); } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java new file mode 100644 index 0000000000..445160a146 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java @@ -0,0 +1,59 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.auth.Credentials; +import com.google.auth.oauth2.GoogleCredentials; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import java.io.IOException; +import java.util.Map; +import javax.annotation.Nullable; + +/** + * A util class to register built-in metrics on a custom OpenTelemetry instance. This is for + * advanced usage, and is only necessary when wanting to write built-in metrics to cloud monitoring + * and custom sinks. Please refer to {@link CustomOpenTelemetryMetricsProvider} for example usage. + */ +public class BuiltinMetricsView { + + private BuiltinMetricsView() {} + + /** + * Register built-in metrics on the {@link SdkMeterProviderBuilder} with application default + * credentials. + */ + public static void registerBuiltinMetrics(String projectId, SdkMeterProviderBuilder builder) + throws IOException { + BuiltinMetricsView.registerBuiltinMetrics( + projectId, GoogleCredentials.getApplicationDefault(), builder); + } + + /** Register built-in metrics on the {@link SdkMeterProviderBuilder} with credentials. */ + public static void registerBuiltinMetrics( + String projectId, @Nullable Credentials credentials, SdkMeterProviderBuilder builder) + throws IOException { + MetricExporter metricExporter = BigtableCloudMonitoringExporter.create(projectId, credentials); + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + builder.registerView(entry.getKey(), entry.getValue()); + } + builder.registerMetricReader(PeriodicMetricReader.create(metricExporter)); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java new file mode 100644 index 0000000000..8c1c5c1c90 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java @@ -0,0 +1,70 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.common.base.MoreObjects; +import io.opentelemetry.api.OpenTelemetry; + +/** + * Set a custom OpenTelemetry instance. + * + *

To register client side metrics on the custom OpenTelemetry: + * + *

{@code
+ * SdkMeterProviderBuilder sdkMeterProvider = SdkMeterProvider.builder();
+ *
+ * // register Builtin metrics on your meter provider with default credentials
+ * BuiltinMetricsView.registerBuiltinMetrics("project-id", sdkMeterProvider);
+ *
+ * // register other metrics reader and views
+ * sdkMeterProvider.registerMetricReader(..);
+ * sdkMeterProvider.registerView(..);
+ *
+ * // create the OTEL instance
+ * OpenTelemetry openTelemetry = OpenTelemetrySdk
+ *     .builder()
+ *     .setMeterProvider(sdkMeterProvider.build())
+ *     .build();
+ *
+ * // Override MetricsProvider in BigtableDataSettings
+ * BigtableDataSettings settings = BigtableDataSettings.newBuilder()
+ *   .setProjectId("my-project")
+ *   .setInstanceId("my-instance-id")
+ *   .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)
+ *   .build();
+ * }
+ */ +public final class CustomOpenTelemetryMetricsProvider implements MetricsProvider { + + private final OpenTelemetry otel; + + public static CustomOpenTelemetryMetricsProvider create(OpenTelemetry otel) { + return new CustomOpenTelemetryMetricsProvider(otel); + } + + private CustomOpenTelemetryMetricsProvider(OpenTelemetry otel) { + this.otel = otel; + } + + public OpenTelemetry getOpenTelemetry() { + return otel; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).add("openTelemetry", otel).toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java new file mode 100644 index 0000000000..b8aad8c931 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java @@ -0,0 +1,63 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalApi; +import com.google.auth.Credentials; +import com.google.common.base.MoreObjects; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import java.io.IOException; +import javax.annotation.Nullable; + +/** + * Set {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}, + * to {@link this#INSTANCE} to enable collecting and export client side metrics + * https://cloud.google.com/bigtable/docs/client-side-metrics. This is the default setting in {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings}. + */ +public final class DefaultMetricsProvider implements MetricsProvider { + + public static DefaultMetricsProvider INSTANCE = new DefaultMetricsProvider(); + + private OpenTelemetry openTelemetry; + private String projectId; + + private DefaultMetricsProvider() {} + + @InternalApi + public OpenTelemetry getOpenTelemetry(String projectId, @Nullable Credentials credentials) + throws IOException { + this.projectId = projectId; + if (openTelemetry == null) { + SdkMeterProviderBuilder meterProvider = SdkMeterProvider.builder(); + BuiltinMetricsView.registerBuiltinMetrics(projectId, credentials, meterProvider); + openTelemetry = OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + } + return openTelemetry; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("projectId", projectId) + .add("openTelemetry", openTelemetry) + .toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java index cab3b0bbd0..a891df9509 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionMetricTracker.java @@ -15,12 +15,15 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME; + import com.google.api.core.InternalApi; -import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection; -import com.google.cloud.bigtable.stats.StatsWrapper; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableMap; import io.grpc.ClientInterceptor; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; import java.util.Collections; import java.util.Set; import java.util.WeakHashMap; @@ -30,24 +33,30 @@ /* Background task that goes through all connections and updates the errors_per_connection metric. */ @InternalApi("For internal use only") public class ErrorCountPerConnectionMetricTracker implements Runnable { + private static final Integer PER_CONNECTION_ERROR_COUNT_PERIOD_SECONDS = 60; + + private final LongHistogram perConnectionErrorCountHistogram; + private final Attributes attributes; + private final Set connectionErrorCountInterceptors; private final Object interceptorsLock = new Object(); - // This is not final so that it can be updated and mocked during testing. - private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection; - @VisibleForTesting - void setStatsRecorderWrapperForConnection( - StatsRecorderWrapperForConnection statsRecorderWrapperForConnection) { - this.statsRecorderWrapperForConnection = statsRecorderWrapperForConnection; - } - - public ErrorCountPerConnectionMetricTracker(ImmutableMap builtinAttributes) { + public ErrorCountPerConnectionMetricTracker(OpenTelemetry openTelemetry, Attributes attributes) { connectionErrorCountInterceptors = Collections.synchronizedSet(Collections.newSetFromMap(new WeakHashMap<>())); - this.statsRecorderWrapperForConnection = - StatsWrapper.createRecorderForConnection(builtinAttributes); + Meter meter = openTelemetry.getMeter(METER_NAME); + + perConnectionErrorCountHistogram = + meter + .histogramBuilder(PER_CONNECTION_ERROR_COUNT_NAME) + .ofLongs() + .setDescription("Distribution of counts of channels per 'error count per minute'.") + .setUnit("1") + .build(); + + this.attributes = attributes; } public void startConnectionErrorCountTracker(ScheduledExecutorService scheduler) { @@ -75,7 +84,7 @@ public void run() { if (errors > 0 || successes > 0) { // TODO: add a metric to also keep track of the number of successful requests per each // connection. - statsRecorderWrapperForConnection.putAndRecordPerConnectionErrorCount(errors); + perConnectionErrorCountHistogram.record(errors, attributes); } } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java new file mode 100644 index 0000000000..251bb41619 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsProvider.java @@ -0,0 +1,25 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.api.core.InternalExtensionOnly; + +/** + * Provide client side metrics https://cloud.google.com/bigtable/docs/client-side-metrics + * implementations. + */ +@InternalExtensionOnly +public interface MetricsProvider {} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java new file mode 100644 index 0000000000..9a00ddb135 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java @@ -0,0 +1,36 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import com.google.common.base.MoreObjects; + +/** + * Set {@link + * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)}, + * to {@link this#INSTANCE} to disable collecting and export client side metrics + * https://cloud.google.com/bigtable/docs/client-side-metrics. + */ +public final class NoopMetricsProvider implements MetricsProvider { + + public static NoopMetricsProvider INSTANCE = new NoopMetricsProvider(); + + private NoopMetricsProvider() {} + + @Override + public String toString() { + return MoreObjects.toStringHelper(this).toString(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java index b7140f0156..ce73d75dc1 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java @@ -21,6 +21,7 @@ import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.rpc.UnaryCallable; import com.google.api.gax.tracing.ApiTracer; +import org.threeten.bp.Duration; /** * This callable will extract total throttled time from {@link ApiCallContext} and add it to {@link @@ -42,7 +43,8 @@ public ApiFuture futureCall(RequestT request, ApiCallContext context) // this should always be true if (tracer instanceof BigtableTracer) { ((BigtableTracer) tracer) - .batchRequestThrottled(context.getOption(Batcher.THROTTLED_TIME_KEY)); + .batchRequestThrottled( + Duration.ofMillis(context.getOption(Batcher.THROTTLED_TIME_KEY)).toNanos()); } } return innerCallable.futureCall(request, context); diff --git a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json index 931ac73adc..95c5bab9e5 100644 --- a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json +++ b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json @@ -395,6 +395,33 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.bigtable.admin.v2.AppProfile$DataBoostIsolationReadOnly", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.bigtable.admin.v2.AppProfile$DataBoostIsolationReadOnly$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.bigtable.admin.v2.AppProfile$DataBoostIsolationReadOnly$ComputeBillingOwner", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.bigtable.admin.v2.AppProfile$MultiClusterRoutingUseAny", "queryAllDeclaredConstructors": true, @@ -1052,6 +1079,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.bigtable.admin.v2.DataBoostReadLocalWrites", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.bigtable.admin.v2.DataBoostReadLocalWrites$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.bigtable.admin.v2.DeleteAppProfileRequest", "queryAllDeclaredConstructors": true, @@ -2033,6 +2078,24 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.bigtable.admin.v2.StandardReadRemoteWrites", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.bigtable.admin.v2.StandardReadRemoteWrites$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.bigtable.admin.v2.StorageType", "queryAllDeclaredConstructors": true, diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java index a35112b380..fea66e82bf 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactoryTest.java @@ -36,6 +36,7 @@ import com.google.bigtable.v2.ReadRowsResponse; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider; import com.google.common.base.Preconditions; import com.google.common.io.BaseEncoding; import io.grpc.Attributes; @@ -169,10 +170,13 @@ public void tearDown() { @Test public void testNewClientsShareTransportChannel() throws Exception { - // Create 3 lightweight clients - - try (BigtableDataClientFactory factory = BigtableDataClientFactory.create(defaultSettings); + try (BigtableDataClientFactory factory = + BigtableDataClientFactory.create( + defaultSettings + .toBuilder() + .setMetricsProvider(NoopMetricsProvider.INSTANCE) + .build()); BigtableDataClient ignored1 = factory.createForInstance("project1", "instance1"); BigtableDataClient ignored2 = factory.createForInstance("project2", "instance2"); BigtableDataClient ignored3 = factory.createForInstance("project3", "instance3")) { @@ -316,7 +320,7 @@ public void testFeatureFlags() throws Exception { @Test public void testBulkMutationFlowControllerConfigured() throws Exception { BigtableDataSettings settings = - BigtableDataSettings.newBuilder() + BigtableDataSettings.newBuilderForEmulator(server.getPort()) .setProjectId("my-project") .setInstanceId("my-instance") .setCredentialsProvider(credentialsProvider) diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java index 4e75fb8631..56181a20ab 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java @@ -15,34 +15,64 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getStartTimeSeconds; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes; +import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.Truth.assertWithMessage; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.client.util.Lists; +import com.google.cloud.bigtable.admin.v2.BigtableInstanceAdminClient; import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient; +import com.google.cloud.bigtable.admin.v2.models.AppProfile; +import com.google.cloud.bigtable.admin.v2.models.CreateAppProfileRequest; import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; import com.google.cloud.bigtable.admin.v2.models.Table; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.PrefixGenerator; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; import com.google.cloud.monitoring.v3.MetricServiceClient; import com.google.common.base.Stopwatch; +import com.google.common.collect.BoundType; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Range; import com.google.monitoring.v3.ListTimeSeriesRequest; import com.google.monitoring.v3.ListTimeSeriesResponse; +import com.google.monitoring.v3.Point; import com.google.monitoring.v3.ProjectName; import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Timestamp; import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.logging.Level; import java.util.logging.Logger; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Rule; import org.junit.Test; @@ -50,6 +80,7 @@ import org.junit.runner.RunWith; import org.junit.runners.JUnit4; import org.threeten.bp.Duration; +import org.threeten.bp.Instant; @RunWith(JUnit4.class) public class BuiltinMetricsIT { @@ -58,71 +89,131 @@ public class BuiltinMetricsIT { private static final Logger logger = Logger.getLogger(BuiltinMetricsIT.class.getName()); @Rule public Timeout globalTimeout = Timeout.seconds(900); - private static Table table; - private static BigtableTableAdminClient tableAdminClient; - private static MetricServiceClient metricClient; + + private Table tableCustomOtel; + private Table tableDefault; + private BigtableDataClient clientCustomOtel; + private BigtableDataClient clientDefault; + private BigtableTableAdminClient tableAdminClient; + private BigtableInstanceAdminClient instanceAdminClient; + private MetricServiceClient metricClient; + + private InMemoryMetricReader metricReader; + private String appProfileCustomOtel; + private String appProfileDefault; public static String[] VIEWS = { "operation_latencies", "attempt_latencies", "connectivity_error_count", - "application_blocking_latencies" + "application_blocking_latencies", }; - @BeforeClass - public static void setUpClass() throws IOException { + @Before + public void setup() throws IOException { + // This test tests 2 things. End-to-end test using the default OTEL instance created by the + // client, and also end-to-end test using a custom OTEL instance set by the customer. In + // both tests, a BigtableCloudMonitoringExporter is created to export data to Cloud Monitoring. assume() .withMessage("Builtin metrics integration test is not supported by emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - // Enable built in metrics - BigtableDataSettings.enableBuiltinMetrics(); - // Create a cloud monitoring client metricClient = MetricServiceClient.create(); tableAdminClient = testEnvRule.env().getTableAdminClient(); + instanceAdminClient = testEnvRule.env().getInstanceAdminClient(); + appProfileCustomOtel = PrefixGenerator.newPrefix("test1"); + appProfileDefault = PrefixGenerator.newPrefix("test2"); + instanceAdminClient.createAppProfile( + CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileCustomOtel) + .setRoutingPolicy( + AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId())) + .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW))); + instanceAdminClient.createAppProfile( + CreateAppProfileRequest.of(testEnvRule.env().getInstanceId(), appProfileDefault) + .setRoutingPolicy( + AppProfile.SingleClusterRoutingPolicy.of(testEnvRule.env().getPrimaryClusterId())) + .setIsolationPolicy(AppProfile.StandardIsolationPolicy.of(AppProfile.Priority.LOW))); + + // When using the custom OTEL instance, we can also register a InMemoryMetricReader on the + // SdkMeterProvider to verify the data exported on Cloud Monitoring with the in memory metric + // data collected in InMemoryMetricReader. + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + clientCustomOtel = + BigtableDataClient.create( + settings + .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)) + .setAppProfileId(appProfileCustomOtel) + .build()); + clientDefault = BigtableDataClient.create(settings.setAppProfileId(appProfileDefault).build()); } - @AfterClass - public static void tearDown() { + @After + public void tearDown() { if (metricClient != null) { metricClient.close(); } - if (table != null) { - tableAdminClient.deleteTable(table.getId()); + if (tableCustomOtel != null) { + tableAdminClient.deleteTable(tableCustomOtel.getId()); + } + if (tableDefault != null) { + tableAdminClient.deleteTable(tableDefault.getId()); + } + if (instanceAdminClient != null) { + instanceAdminClient.deleteAppProfile( + testEnvRule.env().getInstanceId(), appProfileCustomOtel, true); + instanceAdminClient.deleteAppProfile( + testEnvRule.env().getInstanceId(), appProfileDefault, true); + } + if (clientCustomOtel != null) { + clientCustomOtel.close(); + } + if (clientDefault != null) { + clientDefault.close(); } } @Test - public void testBuiltinMetrics() throws Exception { - logger.info("Started testing builtin metrics"); - table = + public void testBuiltinMetricsWithDefaultOTEL() throws Exception { + logger.info("Started testing builtin metrics with default OTEL"); + tableDefault = tableAdminClient.createTable( - CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test")) + CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test1")) .addFamily("cf")); - logger.info("Create table: " + table.getId()); - // Send a MutateRow and ReadRows request - testEnvRule - .env() - .getDataClient() - .mutateRow(RowMutation.create(table.getId(), "a-new-key").setCell("cf", "q", "abc")); + logger.info("Create default table: " + tableDefault.getId()); + + Instant start = Instant.now().minus(Duration.ofSeconds(10)); + + // Send a MutateRow and ReadRows request and measure the latencies for these requests. + clientDefault.mutateRow( + RowMutation.create(tableDefault.getId(), "a-new-key").setCell("cf", "q", "abc")); ArrayList rows = - Lists.newArrayList( - testEnvRule.env().getDataClient().readRows(Query.create(table.getId()).limit(10))); + Lists.newArrayList(clientDefault.readRows(Query.create(tableDefault.getId()).limit(10))); - Stopwatch stopwatch = Stopwatch.createStarted(); + // This stopwatch is used for to limit fetching of metric data in verifyMetrics + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); ProjectName name = ProjectName.of(testEnvRule.env().getProjectId()); - // Restrict time to last 10 minutes and 5 minutes after the request - long startMillis = System.currentTimeMillis() - Duration.ofMinutes(10).toMillis(); - long endMillis = startMillis + Duration.ofMinutes(15).toMillis(); + // Interval is set in the monarch request when query metric timestamps. + // Restrict it to before we send to request and 3 minute after we send the request. If + // it turns out to be still flaky we can increase the filter range. + Instant end = Instant.now().plus(Duration.ofMinutes(3)); TimeInterval interval = TimeInterval.newBuilder() - .setStartTime(Timestamps.fromMillis(startMillis)) - .setEndTime(Timestamps.fromMillis(endMillis)) + .setStartTime(Timestamps.fromMillis(start.toEpochMilli())) + .setEndTime(Timestamps.fromMillis(end.toEpochMilli())) .build(); for (String view : VIEWS) { @@ -132,42 +223,123 @@ public void testBuiltinMetrics() throws Exception { String.format( "metric.type=\"bigtable.googleapis.com/client/%s\" " + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\"" - + " AND resource.labels.table=\"%s\"", - view, testEnvRule.env().getInstanceId(), table.getId()); + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault); ListTimeSeriesRequest.Builder requestBuilder = ListTimeSeriesRequest.newBuilder() .setName(name.toString()) .setFilter(metricFilter) .setInterval(interval) .setView(ListTimeSeriesRequest.TimeSeriesView.FULL); - - verifyMetricsArePublished(requestBuilder.build(), stopwatch, view); + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); // Verify that metrics are published for ReadRows request metricFilter = String.format( "metric.type=\"bigtable.googleapis.com/client/%s\" " + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\"" - + " AND resource.labels.table=\"%s\"", - view, testEnvRule.env().getInstanceId(), table.getId()); + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, testEnvRule.env().getInstanceId(), tableDefault.getId(), appProfileDefault); + requestBuilder.setFilter(metricFilter); + + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + } + } + + @Test + public void testBuiltinMetricsWithCustomOTEL() throws Exception { + logger.info("Started testing builtin metrics with custom OTEL"); + tableCustomOtel = + tableAdminClient.createTable( + CreateTableRequest.of(PrefixGenerator.newPrefix("BuiltinMetricsIT#test2")) + .addFamily("cf")); + logger.info("Create custom table: " + tableCustomOtel.getId()); + + Instant start = Instant.now().minus(Duration.ofSeconds(10)); + // Send a MutateRow and ReadRows request and measure the latencies for these requests. + clientCustomOtel.mutateRow( + RowMutation.create(tableCustomOtel.getId(), "a-new-key").setCell("cf", "q", "abc")); + ArrayList rows = + Lists.newArrayList( + clientCustomOtel.readRows(Query.create(tableCustomOtel.getId()).limit(10))); + + // This stopwatch is used for to limit fetching of metric data in verifyMetrics + Stopwatch metricsPollingStopwatch = Stopwatch.createStarted(); + + ProjectName name = ProjectName.of(testEnvRule.env().getProjectId()); + + Collection fromMetricReader = metricReader.collectAllMetrics(); + + // Interval is set in the monarch request when query metric timestamps. + // Restrict it to before we send to request and 3 minute after we send the request. If + // it turns out to be still flaky we can increase the filter range. + Instant end = start.plus(Duration.ofMinutes(3)); + TimeInterval interval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromMillis(start.toEpochMilli())) + .setEndTime(Timestamps.fromMillis(end.toEpochMilli())) + .build(); + + for (String view : VIEWS) { + String otelMetricName = view; + if (view.equals("application_blocking_latencies")) { + otelMetricName = "application_latencies"; + } + MetricData dataFromReader = getMetricData(fromMetricReader, otelMetricName); + + // Filter on instance and method name + // Verify that metrics are correct for MutateRows request + String metricFilter = + String.format( + "metric.type=\"bigtable.googleapis.com/client/%s\" " + + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.MutateRow\"" + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, + testEnvRule.env().getInstanceId(), + tableCustomOtel.getId(), + appProfileCustomOtel); + ListTimeSeriesRequest.Builder requestBuilder = + ListTimeSeriesRequest.newBuilder() + .setName(name.toString()) + .setFilter(metricFilter) + .setInterval(interval) + .setView(ListTimeSeriesRequest.TimeSeriesView.FULL); + + ListTimeSeriesResponse response = + verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + verifyMetricsWithMetricsReader(response, dataFromReader); + + // Verify that metrics are correct for ReadRows request + metricFilter = + String.format( + "metric.type=\"bigtable.googleapis.com/client/%s\" " + + "AND resource.labels.instance=\"%s\" AND metric.labels.method=\"Bigtable.ReadRows\"" + + " AND resource.labels.table=\"%s\" AND metric.labels.app_profile=\"%s\"", + view, + testEnvRule.env().getInstanceId(), + tableCustomOtel.getId(), + appProfileCustomOtel); requestBuilder.setFilter(metricFilter); - verifyMetricsArePublished(requestBuilder.build(), stopwatch, view); + response = verifyMetricsArePublished(requestBuilder.build(), metricsPollingStopwatch, view); + verifyMetricsWithMetricsReader(response, dataFromReader); } } - private void verifyMetricsArePublished( - ListTimeSeriesRequest request, Stopwatch stopwatch, String view) throws Exception { + private ListTimeSeriesResponse verifyMetricsArePublished( + ListTimeSeriesRequest request, Stopwatch metricsPollingStopwatch, String view) + throws Exception { ListTimeSeriesResponse response = metricClient.listTimeSeriesCallable().call(request); - logger.log( - Level.INFO, - "Checking for view " - + view - + ", has timeseries=" - + response.getTimeSeriesCount() - + " stopwatch elapsed " - + stopwatch.elapsed(TimeUnit.MINUTES)); - while (response.getTimeSeriesCount() == 0 && stopwatch.elapsed(TimeUnit.MINUTES) < 10) { + while (response.getTimeSeriesCount() == 0 + && metricsPollingStopwatch.elapsed(TimeUnit.MINUTES) < 10) { + logger.log( + Level.INFO, + "Checking for view " + + view + + ", has timeseries=" + + response.getTimeSeriesCount() + + " stopwatch elapsed " + + metricsPollingStopwatch.elapsed(TimeUnit.MINUTES)); // Call listTimeSeries every minute Thread.sleep(Duration.ofMinutes(1).toMillis()); response = metricClient.listTimeSeriesCallable().call(request); @@ -176,5 +348,64 @@ private void verifyMetricsArePublished( assertWithMessage("View " + view + " didn't return any data.") .that(response.getTimeSeriesCount()) .isGreaterThan(0); + + return response; + } + + private void verifyMetricsWithMetricsReader( + ListTimeSeriesResponse response, MetricData dataFromReader) { + for (TimeSeries ts : response.getTimeSeriesList()) { + Map attributesMap = + ImmutableMap.builder() + .putAll(ts.getResource().getLabelsMap()) + .putAll(ts.getMetric().getLabelsMap()) + .build(); + AttributesBuilder attributesBuilder = Attributes.builder(); + String streamingKey = BuiltinMetricsConstants.STREAMING_KEY.getKey(); + attributesMap.forEach( + (k, v) -> { + if (!k.equals(streamingKey)) { + attributesBuilder.put(k, v); + } + }); + if (attributesMap.containsKey(streamingKey)) { + attributesBuilder.put(streamingKey, Boolean.parseBoolean(attributesMap.get(streamingKey))); + } + Attributes attributes = attributesBuilder.build(); + verifyAttributes(dataFromReader, attributes); + long expectedValue = getAggregatedValue(dataFromReader, attributes); + Timestamp startTime = getStartTimeSeconds(dataFromReader, attributes); + assertThat(startTime.getSeconds()).isGreaterThan(0); + List point = + ts.getPointsList().stream() + .filter( + p -> + Timestamps.compare(p.getInterval().getStartTime(), startTime) >= 0 + && Timestamps.compare( + p.getInterval().getStartTime(), + Timestamps.add( + startTime, + com.google.protobuf.Duration.newBuilder() + .setSeconds(60) + .build())) + < 0) + .collect(Collectors.toList()); + if (point.size() > 0) { + long actualValue = (long) point.get(0).getValue().getDistributionValue().getMean(); + assertWithMessage( + "actual value does not match expected value, actual value " + + actualValue + + " expected value " + + expectedValue + + " actual start time " + + point.get(0).getInterval().getStartTime() + + " expected start time " + + startTime) + .that(actualValue) + .isIn( + Range.range( + expectedValue - 1, BoundType.CLOSED, expectedValue + 1, BoundType.CLOSED)); + } + } } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java new file mode 100644 index 0000000000..56f6bfa476 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java @@ -0,0 +1,37 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.it; + +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.common.truth.Correspondence; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; + +public class MetricsITUtils { + + static final Correspondence METRIC_DATA_NAME_CONTAINS = + Correspondence.from((md, s) -> md.getName().contains(s), "contains name"); + + static final Correspondence POINT_DATA_CLUSTER_ID_CONTAINS = + Correspondence.from( + (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY).contains(s), + "contains attributes"); + + static final Correspondence POINT_DATA_ZONE_ID_CONTAINS = + Correspondence.from( + (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY).contains(s), + "contains attributes"); +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/SampleRowsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/SampleRowsIT.java index 81fd553c8e..5e5567e3b1 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/SampleRowsIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/SampleRowsIT.java @@ -15,21 +15,24 @@ */ package com.google.cloud.bigtable.data.v2.it; -import static com.google.cloud.bigtable.misc_utilities.AuthorizedViewTestHelper.AUTHORIZED_VIEW_COLUMN_QUALIFIER; -import static com.google.cloud.bigtable.misc_utilities.AuthorizedViewTestHelper.AUTHORIZED_VIEW_ROW_PREFIX; -import static com.google.cloud.bigtable.misc_utilities.AuthorizedViewTestHelper.createTestAuthorizedView; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; import com.google.cloud.bigtable.admin.v2.models.AuthorizedView; +import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest; +import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; +import com.google.cloud.bigtable.admin.v2.models.SubsetView; import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId; import com.google.cloud.bigtable.data.v2.models.KeyOffset; import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; import com.google.common.collect.Lists; +import com.google.protobuf.ByteString; +import java.util.ArrayList; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -75,42 +78,50 @@ public void testOnAuthorizedView() .withMessage("AuthorizedView is not supported on Emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - - AuthorizedView testAuthorizedView = createTestAuthorizedView(testEnvRule); + AuthorizedView testAuthorizedView = createPreSplitTableAndAuthorizedView(); BigtableDataClient client = testEnvRule.env().getDataClient(); - String rowPrefix = AUTHORIZED_VIEW_ROW_PREFIX + UUID.randomUUID(); - String rowPrefixOutsideAuthorizedView = UUID.randomUUID() + "-outside-authorized-view"; - // Create some data so that sample row keys has something to show - List> futures = Lists.newArrayList(); - for (int i = 0; i < 10; i++) { - ApiFuture future = - client.mutateRowAsync( - RowMutation.create(testEnvRule.env().getTableId(), rowPrefix + "-" + i) - .setCell( - testEnvRule.env().getFamilyId(), AUTHORIZED_VIEW_COLUMN_QUALIFIER, "value")); - futures.add(future); - ApiFuture futureOutsideAuthorizedView = - client.mutateRowAsync( - RowMutation.create( - testEnvRule.env().getTableId(), rowPrefixOutsideAuthorizedView + "-" + i) - .setCell( - testEnvRule.env().getFamilyId(), AUTHORIZED_VIEW_COLUMN_QUALIFIER, "value")); - futures.add(futureOutsideAuthorizedView); + ApiFuture> future = + client.sampleRowKeysAsync( + AuthorizedViewId.of(testAuthorizedView.getTableId(), testAuthorizedView.getId())); + + List results = future.get(1, TimeUnit.MINUTES); + + List resultKeys = new ArrayList<>(); + for (KeyOffset keyOffset : results) { + resultKeys.add(keyOffset.getKey()); } - ApiFutures.allAsList(futures).get(1, TimeUnit.MINUTES); - ApiFuture> future = client.sampleRowKeysAsync(testEnvRule.env().getTableId()); + assertThat(resultKeys) + .containsExactly( + ByteString.copyFromUtf8("food"), + ByteString.copyFromUtf8("fool"), + ByteString.copyFromUtf8("fop")); - List results = future.get(1, TimeUnit.MINUTES); + testEnvRule + .env() + .getTableAdminClient() + .deleteAuthorizedView(testAuthorizedView.getTableId(), testAuthorizedView.getId()); + } - assertThat(results).isNotEmpty(); - assertThat(results.get(results.size() - 1).getOffsetBytes()).isGreaterThan(0L); + private static AuthorizedView createPreSplitTableAndAuthorizedView() { + String tableId = UUID.randomUUID().toString(); + String authorizedViewId = UUID.randomUUID().toString(); testEnvRule .env() .getTableAdminClient() - .deleteAuthorizedView(testEnvRule.env().getTableId(), testAuthorizedView.getId()); + .createTable( + CreateTableRequest.of(tableId) + .addSplit(ByteString.copyFromUtf8("apple")) + .addSplit(ByteString.copyFromUtf8("food")) + .addSplit(ByteString.copyFromUtf8("fool")) + .addSplit(ByteString.copyFromUtf8("good"))); + CreateAuthorizedViewRequest request = + CreateAuthorizedViewRequest.of(tableId, authorizedViewId) + .setAuthorizedViewType(SubsetView.create().addRowPrefix("foo")) + .setDeletionProtection(false); + return testEnvRule.env().getTableAdminClient().createAuthorizedView(request); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java index b0e12d5ade..84ab24f1c8 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java @@ -15,37 +15,76 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.core.ApiFuture; import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.bigtable.admin.v2.models.Cluster; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; -import com.google.cloud.bigtable.stats.BuiltinViews; -import com.google.cloud.bigtable.stats.StatsWrapper; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; import com.google.common.collect.Lists; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; public class StreamingMetricsMetadataIT { @ClassRule public static TestEnvRule testEnvRule = new TestEnvRule(); - @BeforeClass - public static void setUpClass() { + private BigtableDataClient client; + private InMemoryMetricReader metricReader; + + @Before + public void setup() throws IOException { assume() .withMessage("StreamingMetricsMetadataIT is not supported on Emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - BuiltinViews.registerBigtableBuiltinViews(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)); + client = BigtableDataClient.create(settings.build()); + } + + @After + public void tearDown() throws IOException { + if (client != null) { + client.close(); + } } @Test @@ -54,7 +93,7 @@ public void testSuccess() throws Exception { String uniqueKey = prefix + "-read"; Query query = Query.create(testEnvRule.env().getTableId()).rowKey(uniqueKey); - ArrayList rows = Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query)); + ArrayList rows = Lists.newArrayList(client.readRows(query)); ApiFuture> clustersFuture = testEnvRule @@ -64,27 +103,73 @@ public void testSuccess() throws Exception { List clusters = clustersFuture.get(1, TimeUnit.MINUTES); - // give opencensus some time to populate view data - Thread.sleep(100); + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + metricReader.collectAllMetrics().stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains(clusters.get(0).getZone()); - assertThat(tagValueStrings).contains(clusters.get(0).getId()); + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); + + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS) + .contains(clusters.get(0).getZone()); + assertThat(clusterAttributes).contains(clusters.get(0).getId()); + assertThat(zoneAttributes).contains(clusters.get(0).getZone()); } @Test - public void testFailure() throws InterruptedException { + public void testFailure() { Query query = Query.create("non-exist-table"); try { - Lists.newArrayList(testEnvRule.env().getDataClient().readRows(query)); + Lists.newArrayList(client.readRows(query)); } catch (NotFoundException e) { } - // give opencensus some time to populate view data - Thread.sleep(100); + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + metricReader.collectAllMetrics().stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); + + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains("unspecified"); - assertThat(tagValueStrings).contains("global"); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains("unspecified"); + assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global"); + assertThat(clusterAttributes).contains("unspecified"); + assertThat(zoneAttributes).contains("global"); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java index aa2a4317fc..ad5f71db8f 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java @@ -15,35 +15,76 @@ */ package com.google.cloud.bigtable.data.v2.it; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.METRIC_DATA_NAME_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_CLUSTER_ID_CONTAINS; +import static com.google.cloud.bigtable.data.v2.it.MetricsITUtils.POINT_DATA_ZONE_ID_CONTAINS; import static com.google.common.truth.Truth.assertThat; import static com.google.common.truth.TruthJUnit.assume; import com.google.api.core.ApiFuture; import com.google.api.gax.rpc.NotFoundException; import com.google.cloud.bigtable.admin.v2.models.Cluster; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.RowMutation; -import com.google.cloud.bigtable.stats.BuiltinViews; -import com.google.cloud.bigtable.stats.StatsWrapper; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; +import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsView; +import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv; import com.google.cloud.bigtable.test_helpers.env.TestEnvRule; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import org.junit.BeforeClass; +import java.util.stream.Collectors; +import org.junit.After; +import org.junit.Before; import org.junit.ClassRule; import org.junit.Test; public class UnaryMetricsMetadataIT { @ClassRule public static TestEnvRule testEnvRule = new TestEnvRule(); - @BeforeClass - public static void setUpClass() { + private BigtableDataClient client; + private InMemoryMetricReader metricReader; + + @Before + public void setup() throws IOException { assume() .withMessage("UnaryMetricsMetadataIT is not supported on Emulator") .that(testEnvRule.env()) .isNotInstanceOf(EmulatorEnv.class); - BuiltinViews.registerBigtableBuiltinViews(); + + BigtableDataSettings.Builder settings = testEnvRule.env().getDataClientSettings().toBuilder(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + BuiltinMetricsView.registerBuiltinMetrics(testEnvRule.env().getProjectId(), meterProvider); + OpenTelemetry openTelemetry = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + + settings.setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(openTelemetry)); + + client = BigtableDataClient.create(settings.build()); + } + + @After + public void tearDown() throws IOException { + if (client != null) { + client.close(); + } } @Test @@ -52,9 +93,7 @@ public void testSuccess() throws Exception { String familyId = testEnvRule.env().getFamilyId(); ApiFuture future = - testEnvRule - .env() - .getDataClient() + client .mutateRowCallable() .futureCall( RowMutation.create(testEnvRule.env().getTableId(), rowKey) @@ -69,18 +108,36 @@ public void testSuccess() throws Exception { .listClustersAsync(testEnvRule.env().getInstanceId()); List clusters = clustersFuture.get(1, TimeUnit.MINUTES); - // give opencensus some time to populate view data - for (int i = 0; i < 10; i++) { - if (StatsWrapper.getOperationLatencyViewTagValueStrings() - .contains(clusters.get(0).getZone())) { - break; - } - Thread.sleep(100); - } + Collection allMetricData = metricReader.collectAllMetrics(); + List metrics = + allMetricData.stream() + .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME)) + .collect(Collectors.toList()); + + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metrics).hasSize(1); + + MetricData metricData = metrics.get(0); + List pointData = new ArrayList<>(metricData.getData().getPoints()); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains(clusters.get(0).getZone()); - assertThat(tagValueStrings).contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains(clusters.get(0).getId()); + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS) + .contains(clusters.get(0).getZone()); + assertThat(clusterAttributes).contains(clusters.get(0).getId()); + assertThat(zoneAttributes).contains(clusters.get(0).getZone()); } @Test @@ -89,9 +146,7 @@ public void testFailure() throws Exception { String familyId = testEnvRule.env().getFamilyId(); ApiFuture future = - testEnvRule - .env() - .getDataClient() + client .mutateRowCallable() .futureCall( RowMutation.create("non-exist-table", rowKey).setCell(familyId, "q", "myVal")); @@ -106,16 +161,39 @@ public void testFailure() throws Exception { } } - // give opencensus some time to populate view data - for (int i = 0; i < 10; i++) { - if (StatsWrapper.getOperationLatencyViewTagValueStrings().contains("unspecified")) { + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = null; + for (MetricData md : allMetricData) { + if (md.getName() + .equals( + BuiltinMetricsConstants.METER_NAME + + BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME)) { + metricData = md; break; } - Thread.sleep(100); } - List tagValueStrings = StatsWrapper.getOperationLatencyViewTagValueStrings(); - assertThat(tagValueStrings).contains("unspecified"); - assertThat(tagValueStrings).contains("global"); + assertThat(allMetricData) + .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS) + .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME); + assertThat(metricData).isNotNull(); + + List pointData = new ArrayList<>(metricData.getData().getPoints()); + + assertThat(pointData) + .comparingElementsUsing(POINT_DATA_CLUSTER_ID_CONTAINS) + .contains("unspecified"); + assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global"); + List clusterAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY)) + .collect(Collectors.toList()); + List zoneAttributes = + pointData.stream() + .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY)) + .collect(Collectors.toList()); + + assertThat(clusterAttributes).contains("unspecified"); + assertThat(zoneAttributes).contains("global"); } } diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java index 79cbccb0ac..290fcc321f 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java @@ -885,6 +885,7 @@ public void enableRetryInfoFalseValueTest() throws IOException { "generateInitialChangeStreamPartitionsSettings", "readChangeStreamSettings", "pingAndWarmSettings", + "metricsProvider", }; @Test diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java index 1975d0da25..abbf46c468 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java @@ -18,12 +18,9 @@ import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertThrows; -import com.google.api.gax.core.NoCredentialsProvider; import com.google.api.gax.grpc.GrpcStatusCode; -import com.google.api.gax.grpc.GrpcTransportChannel; import com.google.api.gax.rpc.ApiException; import com.google.api.gax.rpc.ErrorDetails; -import com.google.api.gax.rpc.FixedTransportChannelProvider; import com.google.api.gax.rpc.InternalException; import com.google.api.gax.rpc.UnavailableException; import com.google.bigtable.v2.BigtableGrpc; @@ -45,6 +42,7 @@ import com.google.bigtable.v2.SampleRowKeysResponse; import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.FakeServiceBuilder; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.Filters; @@ -55,22 +53,31 @@ import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableList; import com.google.common.collect.Queues; import com.google.protobuf.Any; import com.google.rpc.RetryInfo; +import io.grpc.ForwardingServerCall; import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Server; +import io.grpc.ServerCall; +import io.grpc.ServerCallHandler; +import io.grpc.ServerInterceptor; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; -import io.grpc.testing.GrpcServerRule; import java.io.IOException; import java.time.Duration; +import java.util.HashSet; import java.util.Queue; +import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.junit.After; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; @@ -78,12 +85,13 @@ @RunWith(JUnit4.class) public class RetryInfoTest { - @Rule public GrpcServerRule serverRule = new GrpcServerRule(); - private static final Metadata.Key ERROR_DETAILS_KEY = Metadata.Key.of("grpc-status-details-bin", Metadata.BINARY_BYTE_MARSHALLER); + private final Set methods = new HashSet<>(); + private FakeBigtableService service; + private Server server; private BigtableDataClient client; private BigtableDataSettings.Builder settings; @@ -94,29 +102,111 @@ public class RetryInfoTest { @Before public void setUp() throws IOException { service = new FakeBigtableService(); - serverRule.getServiceRegistry().addService(service); + + ServerInterceptor serverInterceptor = + new ServerInterceptor() { + @Override + public ServerCall.Listener interceptCall( + ServerCall serverCall, + Metadata metadata, + ServerCallHandler serverCallHandler) { + return serverCallHandler.startCall( + new ForwardingServerCall.SimpleForwardingServerCall(serverCall) { + @Override + public void close(Status status, Metadata trailers) { + if (trailers.containsKey(ERROR_DETAILS_KEY)) { + methods.add(serverCall.getMethodDescriptor().getBareMethodName()); + } + super.close(status, trailers); + } + }, + metadata); + } + }; + server = FakeServiceBuilder.create(service).intercept(serverInterceptor).start(); settings = - BigtableDataSettings.newBuilder() + BigtableDataSettings.newBuilderForEmulator(server.getPort()) .setProjectId("fake-project") - .setInstanceId("fake-instance") - .setCredentialsProvider(NoCredentialsProvider.create()); - - settings - .stubSettings() - .setTransportChannelProvider( - FixedTransportChannelProvider.create( - GrpcTransportChannel.create(serverRule.getChannel()))) - // channel priming doesn't work with FixedTransportChannelProvider. Disable it for the test - .setRefreshingChannel(false) - .build(); + .setInstanceId("fake-instance"); this.client = BigtableDataClient.create(settings.build()); } + @After + public void tearDown() { + if (client != null) { + client.close(); + } + if (server != null) { + server.shutdown(); + } + } + @Test - public void testReadRow() { - verifyRetryInfoIsUsed(() -> client.readRow("table", "row"), true); + public void testAllMethods() { + // Verify retry info is handled correctly for all the methods in data API. + verifyRetryInfoIsUsed(() -> client.readRow(TableId.of("table"), "row"), true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> client.readRows(Query.create(TableId.of("table"))).iterator().hasNext(), true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> + client.bulkMutateRows( + BulkMutation.create(TableId.of("fake-table")) + .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> + client.mutateRow( + RowMutation.create(TableId.of("fake-table"), "key").setCell("cf", "q", "v")), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed(() -> client.sampleRowKeys(TableId.of("table")), true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> + client.checkAndMutateRow( + ConditionalRowMutation.create("table", "key") + .condition(Filters.FILTERS.value().regex("old-value")) + .then(Mutation.create().setCell("cf", "q", "v"))), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> + client.readModifyWriteRow( + ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> client.readChangeStream(ReadChangeStreamQuery.create("table")).iterator().hasNext(), + true); + + attemptCounter.set(0); + verifyRetryInfoIsUsed( + () -> client.generateInitialChangeStreamPartitions("table").iterator().hasNext(), true); + + // Verify that the new data API methods are tested or excluded. This is enforced by + // introspecting grpc + // method descriptors. + Set expected = + BigtableGrpc.getServiceDescriptor().getMethods().stream() + .map(MethodDescriptor::getBareMethodName) + .collect(Collectors.toSet()); + + // Exclude methods that don't support retry info + methods.add("PingAndWarm"); + + assertThat(methods).containsExactlyElementsIn(expected); } @Test @@ -147,11 +237,6 @@ public void testReadRowServerNotReturningRetryInfoClientDisabledHandling() throw } } - @Test - public void testReadRows() { - verifyRetryInfoIsUsed(() -> client.readRows(Query.create("table")).iterator().hasNext(), true); - } - @Test public void testReadRowsNonRetraybleErrorWithRetryInfo() { verifyRetryInfoIsUsed(() -> client.readRows(Query.create("table")).iterator().hasNext(), false); @@ -181,16 +266,6 @@ public void testReadRowsServerNotReturningRetryInfoClientDisabledHandling() thro } } - @Test - public void testMutateRows() { - verifyRetryInfoIsUsed( - () -> - client.bulkMutateRows( - BulkMutation.create("fake-table") - .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))), - true); - } - @Test public void testMutateRowsNonRetryableErrorWithRetryInfo() { verifyRetryInfoIsUsed( @@ -238,12 +313,6 @@ public void testMutateRowsServerNotReturningRetryInfoClientDisabledHandling() th } } - @Test - public void testMutateRow() { - verifyRetryInfoIsUsed( - () -> client.mutateRow(RowMutation.create("table", "key").setCell("cf", "q", "v")), true); - } - @Test public void testMutateRowNonRetryableErrorWithRetryInfo() { verifyRetryInfoIsUsed( @@ -278,11 +347,6 @@ public void testMutateRowServerNotReturningRetryInfoClientDisabledHandling() thr } } - @Test - public void testSampleRowKeys() { - verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), true); - } - @Test public void testSampleRowKeysNonRetryableErrorWithRetryInfo() { verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), false); @@ -312,17 +376,6 @@ public void testSampleRowKeysServerNotReturningRetryInfoClientDisabledHandling() } } - @Test - public void testCheckAndMutateRow() { - verifyRetryInfoIsUsed( - () -> - client.checkAndMutateRow( - ConditionalRowMutation.create("table", "key") - .condition(Filters.FILTERS.value().regex("old-value")) - .then(Mutation.create().setCell("cf", "q", "v"))), - true); - } - @Test public void testCheckAndMutateDisableRetryInfo() throws IOException { settings.stubSettings().setEnableRetryInfo(false); @@ -368,15 +421,6 @@ public void testCheckAndMutateServerNotReturningRetryInfoClientDisabledHandling( } } - @Test - public void testReadModifyWrite() { - verifyRetryInfoIsUsed( - () -> - client.readModifyWriteRow( - ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")), - true); - } - @Test public void testReadModifyWriteDisableRetryInfo() throws IOException { settings.stubSettings().setEnableRetryInfo(false); @@ -414,13 +458,6 @@ public void testReadModifyWriteNotReturningRetryInfoClientDisabledHandling() thr } } - @Test - public void testReadChangeStream() { - verifyRetryInfoIsUsed( - () -> client.readChangeStream(ReadChangeStreamQuery.create("table")).iterator().hasNext(), - true); - } - @Test public void testReadChangeStreamNonRetryableErrorWithRetryInfo() { verifyRetryInfoIsUsed( @@ -465,12 +502,6 @@ public void testReadChangeStreamNotReturningRetryInfoClientDisabledHandling() th } } - @Test - public void testGenerateInitialChangeStreamPartition() { - verifyRetryInfoIsUsed( - () -> client.generateInitialChangeStreamPartitions("table").iterator().hasNext(), true); - } - @Test public void testGenerateInitialChangeStreamPartitionNonRetryableError() { verifyRetryInfoIsUsed( diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java new file mode 100644 index 0000000000..a0b9c058dc --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java @@ -0,0 +1,310 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; +import static com.google.common.truth.Truth.assertThat; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.google.api.Distribution; +import com.google.api.MonitoredResource; +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutures; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.stub.MetricServiceStub; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.common.InstrumentationScopeInfo; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData; +import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData; +import io.opentelemetry.sdk.resources.Resource; +import java.util.Arrays; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnit; +import org.mockito.junit.MockitoRule; + +public class BigtableCloudMonitoringExporterTest { + private static final String projectId = "fake-project"; + private static final String instanceId = "fake-instance"; + private static final String appProfileId = "default"; + private static final String tableId = "fake-table"; + private static final String zone = "us-east-1"; + private static final String cluster = "cluster-1"; + + private static final String clientName = "fake-client-name"; + private static final String taskId = "fake-task-id"; + + @Rule public final MockitoRule mockitoRule = MockitoJUnit.rule(); + + @Mock private MetricServiceStub mockMetricServiceStub; + private MetricServiceClient fakeMetricServiceClient; + private BigtableCloudMonitoringExporter exporter; + + private Attributes attributes; + private Resource resource; + private InstrumentationScopeInfo scope; + + @Before + public void setUp() { + fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub); + + exporter = + new BigtableCloudMonitoringExporter( + projectId, fakeMetricServiceClient, /* applicationResource= */ null, taskId); + + attributes = + Attributes.builder() + .put(BIGTABLE_PROJECT_ID_KEY, projectId) + .put(INSTANCE_ID_KEY, instanceId) + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, cluster) + .put(ZONE_ID_KEY, zone) + .put(APP_PROFILE_KEY, appProfileId) + .build(); + + resource = Resource.create(Attributes.empty()); + + scope = InstrumentationScopeInfo.create(BuiltinMetricsConstants.METER_NAME); + } + + @After + public void tearDown() {} + + @Test + public void testExportingSumData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long fakeValue = 11L; + + long startEpoch = 10; + long endEpoch = 15; + LongPointData longPointData = + ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue); + + MetricData longData = + ImmutableMetricData.createLongSum( + resource, + scope, + "bigtable.googleapis.com/internal/client/retry_count", + "description", + "1", + ImmutableSumData.create( + true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData))); + + exporter.export(Arrays.asList(longData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + BIGTABLE_PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + TABLE_ID_KEY.getKey(), tableId, + CLUSTER_ID_KEY.getKey(), cluster, + ZONE_ID_KEY.getKey(), zone); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId); + assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testExportingHistogramData() { + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + attributes, + 3d, + true, + 1d, // min + true, + 2d, // max + Arrays.asList(1.0), + Arrays.asList(1L, 2L)); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "bigtable.googleapis.com/internal/client/operation_latencies", + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Arrays.asList(histogramData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getTimeSeriesList()).hasSize(1); + + TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly( + BIGTABLE_PROJECT_ID_KEY.getKey(), projectId, + INSTANCE_ID_KEY.getKey(), instanceId, + TABLE_ID_KEY.getKey(), tableId, + CLUSTER_ID_KEY.getKey(), cluster, + ZONE_ID_KEY.getKey(), zone); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId); + Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue(); + assertThat(distribution.getCount()).isEqualTo(3); + assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos()) + .isEqualTo(startEpoch); + assertThat(timeSeries.getPoints(0).getInterval().getEndTime().getNanos()).isEqualTo(endEpoch); + } + + @Test + public void testTimeSeriesForMetricWithGceOrGkeResource() { + String gceProjectId = "fake-gce-project"; + BigtableCloudMonitoringExporter exporter = + new BigtableCloudMonitoringExporter( + projectId, + fakeMetricServiceClient, + MonitoredResource.newBuilder() + .setType("gce-instance") + .putLabels("some-gce-key", "some-gce-value") + .putLabels("project_id", gceProjectId) + .build(), + taskId); + ArgumentCaptor argumentCaptor = + ArgumentCaptor.forClass(CreateTimeSeriesRequest.class); + + UnaryCallable mockCallable = mock(UnaryCallable.class); + when(mockMetricServiceStub.createServiceTimeSeriesCallable()).thenReturn(mockCallable); + ApiFuture future = ApiFutures.immediateFuture(Empty.getDefaultInstance()); + when(mockCallable.futureCall(argumentCaptor.capture())).thenReturn(future); + + long startEpoch = 10; + long endEpoch = 15; + HistogramPointData histogramPointData = + ImmutableHistogramPointData.create( + startEpoch, + endEpoch, + Attributes.of( + BIGTABLE_PROJECT_ID_KEY, + projectId, + INSTANCE_ID_KEY, + instanceId, + APP_PROFILE_KEY, + appProfileId, + CLIENT_NAME_KEY, + clientName), + 3d, + true, + 1d, // min + true, + 2d, // max + Arrays.asList(1.0), + Arrays.asList(1L, 2L)); + + MetricData histogramData = + ImmutableMetricData.createDoubleHistogram( + resource, + scope, + "bigtable.googleapis.com/internal/client/per_connection_error_count", + "description", + "ms", + ImmutableHistogramData.create( + AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData))); + + exporter.export(Arrays.asList(histogramData)); + + CreateTimeSeriesRequest request = argumentCaptor.getValue(); + + assertThat(request.getName()).isEqualTo("projects/" + gceProjectId); + assertThat(request.getTimeSeriesList()).hasSize(1); + + com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0); + + assertThat(timeSeries.getResource().getLabelsMap()) + .containsExactly("some-gce-key", "some-gce-value", "project_id", gceProjectId); + + assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(5); + assertThat(timeSeries.getMetric().getLabelsMap()) + .containsAtLeast( + BIGTABLE_PROJECT_ID_KEY.getKey(), + projectId, + INSTANCE_ID_KEY.getKey(), + instanceId, + APP_PROFILE_KEY.getKey(), + appProfileId, + CLIENT_NAME_KEY.getKey(), + clientName, + CLIENT_UID_KEY.getKey(), + taskId); + } + + private static class FakeMetricServiceClient extends MetricServiceClient { + + protected FakeMetricServiceClient(MetricServiceStub stub) { + super(stub); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java index 5d16b623fd..a12dd3cfbd 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java @@ -45,7 +45,6 @@ import com.google.cloud.bigtable.data.v2.models.SampleRowKeysRequest; import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; -import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; import com.google.common.collect.ImmutableMap; import io.grpc.ForwardingServerCall.SimpleForwardingServerCall; import io.grpc.Metadata; @@ -126,16 +125,21 @@ public void sendHeaders(Metadata headers) { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings stubSettings = - settings - .getStubSettings() + + ClientContext clientContext = + EnhancedBigtableStub.createClientContext(settings.getStubSettings()); + clientContext = + clientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder())) + settings.getStubSettings(), + Tags.getTagger(), + localStats.getStatsRecorder(), + null)) .build(); - attempts = stubSettings.readRowsSettings().getRetrySettings().getMaxAttempts(); - stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings)); + attempts = settings.getStubSettings().readRowsSettings().getRetrySettings().getMaxAttempts(); + stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext); // Create another server without injecting the server-timing header and another stub that // connects to it. @@ -147,18 +151,21 @@ public void sendHeaders(Metadata headers) { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings noHeaderStubSettings = - noHeaderSettings - .getStubSettings() + + ClientContext noHeaderClientContext = + EnhancedBigtableStub.createClientContext(noHeaderSettings.getStubSettings()); + noHeaderClientContext = + noHeaderClientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( noHeaderSettings.getStubSettings(), Tags.getTagger(), - localStats.getStatsRecorder())) + localStats.getStatsRecorder(), + null)) .build(); noHeaderStub = - new EnhancedBigtableStub(noHeaderStubSettings, ClientContext.create(noHeaderStubSettings)); + new EnhancedBigtableStub(noHeaderSettings.getStubSettings(), noHeaderClientContext); } @After diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java new file mode 100644 index 0000000000..09b7e1f663 --- /dev/null +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java @@ -0,0 +1,112 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub.metrics; + +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.core.InternalApi; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import java.util.Collection; +import java.util.List; +import java.util.stream.Collectors; +import org.junit.Assert; + +@InternalApi +public class BuiltinMetricsTestUtils { + + private BuiltinMetricsTestUtils() {} + + public static MetricData getMetricData(Collection allMetricData, String metricName) { + List metricDataList = + allMetricData.stream() + .filter(md -> md.getName().equals(BuiltinMetricsConstants.METER_NAME + metricName)) + .collect(Collectors.toList()); + if (metricDataList.size() == 0) { + allMetricData.stream().forEach(md -> System.out.println(md.getName())); + } + assertThat(metricDataList.size()).isEqualTo(1); + + return metricDataList.get(0); + } + + public static long getAggregatedValue(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + HistogramPointData hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return (long) hd.getSum() / hd.getCount(); + case LONG_SUM: + LongPointData ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return ld.getValue(); + default: + return 0; + } + } + + public static Timestamp getStartTimeSeconds(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + HistogramPointData hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return Timestamps.fromNanos(hd.getStartEpochNanos()); + case LONG_SUM: + LongPointData ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()) + .get(0); + return Timestamps.fromNanos(ld.getStartEpochNanos()); + default: + return Timestamp.getDefaultInstance(); + } + } + + public static void verifyAttributes(MetricData metricData, Attributes attributes) { + switch (metricData.getType()) { + case HISTOGRAM: + List hd = + metricData.getHistogramData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()); + assertThat(hd).isNotEmpty(); + break; + case LONG_SUM: + List ld = + metricData.getLongSumData().getPoints().stream() + .filter(pd -> pd.getAttributes().equals(attributes)) + .collect(Collectors.toList()); + assertThat(ld).isNotEmpty(); + break; + default: + Assert.fail("Unexpected type"); + } + } +} diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java index 06b923cad3..2dd4bcabb3 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java @@ -15,14 +15,24 @@ */ package com.google.cloud.bigtable.data.v2.stub.metrics; -import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData; +import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.verifyAttributes; import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; import com.google.api.client.util.Lists; import com.google.api.core.ApiFunction; @@ -36,7 +46,6 @@ import com.google.api.gax.rpc.NotFoundException; import com.google.api.gax.rpc.ResponseObserver; import com.google.api.gax.rpc.StreamController; -import com.google.api.gax.tracing.SpanName; import com.google.bigtable.v2.BigtableGrpc; import com.google.bigtable.v2.MutateRowRequest; import com.google.bigtable.v2.MutateRowResponse; @@ -45,6 +54,7 @@ import com.google.bigtable.v2.ReadRowsRequest; import com.google.bigtable.v2.ReadRowsResponse; import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.FakeServiceBuilder; import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId; @@ -52,9 +62,9 @@ import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.StatsRecorderWrapper; import com.google.common.base.Stopwatch; import com.google.common.collect.Range; import com.google.protobuf.ByteString; @@ -77,11 +87,21 @@ import io.grpc.StatusRuntimeException; import io.grpc.stub.ServerCallStreamObserver; import io.grpc.stub.StreamObserver; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; import java.nio.charset.Charset; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; @@ -92,12 +112,8 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.JUnit4; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; import org.mockito.junit.MockitoJUnit; import org.mockito.junit.MockitoRule; -import org.mockito.stubbing.Answer; import org.threeten.bp.Duration; @RunWith(JUnit4.class) @@ -105,8 +121,8 @@ public class BuiltinMetricsTracerTest { private static final String PROJECT_ID = "fake-project"; private static final String INSTANCE_ID = "fake-instance"; private static final String APP_PROFILE_ID = "default"; - private static final String TABLE_ID = "fake-table"; - private static final String AUTHORIZED_VIEW_ID = "fake-authorized-view"; + private static final String TABLE = "fake-table"; + private static final String BAD_TABLE_ID = "non-exist-table"; private static final String ZONE = "us-west-1"; private static final String CLUSTER = "cluster-0"; @@ -114,6 +130,7 @@ public class BuiltinMetricsTracerTest { private static final long SERVER_LATENCY = 100; private static final long APPLICATION_LATENCY = 200; private static final long SLEEP_VARIABILITY = 15; + private static final String CLIENT_NAME = "java-bigtable/" + Version.VERSION; private static final long CHANNEL_BLOCKING_LATENCY = 75; @@ -124,18 +141,35 @@ public class BuiltinMetricsTracerTest { private EnhancedBigtableStub stub; - @Mock private BuiltinMetricsTracerFactory mockFactory; - @Mock private StatsRecorderWrapper statsRecorderWrapper; + private int batchElementCount = 2; - @Captor private ArgumentCaptor status; - @Captor private ArgumentCaptor tableId; - @Captor private ArgumentCaptor zone; - @Captor private ArgumentCaptor cluster; + private Attributes baseAttributes; - private int batchElementCount = 2; + private InMemoryMetricReader metricReader; @Before public void setUp() throws Exception { + metricReader = InMemoryMetricReader.create(); + + baseAttributes = + Attributes.builder() + .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, PROJECT_ID) + .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, INSTANCE_ID) + .put(BuiltinMetricsConstants.APP_PROFILE_KEY, APP_PROFILE_ID) + .build(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + meterProvider.registerView(entry.getKey(), entry.getValue()); + } + + OpenTelemetrySdk otel = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + BuiltinMetricsTracerFactory facotry = BuiltinMetricsTracerFactory.create(otel, baseAttributes); + // Add an interceptor to add server-timing in headers ServerInterceptor trailersInterceptor = new ServerInterceptor() { @@ -216,7 +250,8 @@ public void sendMessage(ReqT message) { .setMaxOutstandingRequestBytes(1001L) .build()) .build()); - stubSettingsBuilder.setTracerFactory(mockFactory); + + stubSettingsBuilder.setTracerFactory(facotry); InstantiatingGrpcChannelProvider.Builder channelProvider = ((InstantiatingGrpcChannelProvider) stubSettingsBuilder.getTransportChannelProvider()) @@ -247,117 +282,117 @@ public void tearDown() { @Test public void testReadRowsOperationLatencies() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - Stopwatch stopwatch = Stopwatch.createStarted(); - Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator()); + Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)).iterator()); long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - // verify record operation is only called once - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(STREAMING_KEY, true) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); - assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed)); - assertThat(status.getAllValues()).containsExactly("OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID); - assertThat(zone.getAllValues()).containsExactly(ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed)); } @Test public void testReadRowsOperationLatenciesOnAuthorizedView() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - + String authorizedViewId = "test-authorized-view-id"; Stopwatch stopwatch = Stopwatch.createStarted(); Lists.newArrayList( - stub.readRowsCallable() - .call(Query.create(AuthorizedViewId.of(TABLE_ID, AUTHORIZED_VIEW_ID))) - .iterator()); + stub.readRowsCallable().call(Query.create(AuthorizedViewId.of(TABLE, authorizedViewId)))); long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - // verify record operation is only called once - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(STREAMING_KEY, true) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(operationLatency.getValue()).isIn(Range.closed(SERVER_LATENCY, elapsed)); - assertThat(status.getAllValues()).containsExactly("OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID); - assertThat(zone.getAllValues()).containsExactly(ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER); + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData metricData = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed)); } @Test public void testGfeMetrics() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - ArgumentCaptor gfeLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor gfeMissingHeaders = ArgumentCaptor.forClass(Long.class); - - Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE_ID))); - - // Verify record attempt are called multiple times - verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - - // The request was retried and gfe latency is only recorded in the retry attempt - verify(statsRecorderWrapper).putGfeLatencies(gfeLatency.capture()); - assertThat(gfeLatency.getValue()).isEqualTo(FAKE_SERVER_TIMING); - - // The first time the request was retried, it'll increment missing header counter - verify(statsRecorderWrapper, times(fakeService.getAttemptCounter().get())) - .putGfeMissingHeaders(gfeMissingHeaders.capture()); - assertThat(gfeMissingHeaders.getAllValues()).containsExactly(1L, 0L); - - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID); - assertThat(zone.getAllValues()).containsExactly("global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER); + Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE))); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); + + Collection allMetricData = metricReader.collectAllMetrics(); + + MetricData serverLatenciesMetricData = getMetricData(allMetricData, SERVER_LATENCIES_NAME); + + long serverLatencies = getAggregatedValue(serverLatenciesMetricData, expectedAttributes); + assertThat(serverLatencies).isEqualTo(FAKE_SERVER_TIMING); + + MetricData connectivityErrorCountMetricData = + getMetricData(allMetricData, CONNECTIVITY_ERROR_COUNT_NAME); + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + verifyAttributes(connectivityErrorCountMetricData, expected1); + verifyAttributes(connectivityErrorCountMetricData, expected2); + + assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected1)).isEqualTo(1); + assertThat(getAggregatedValue(connectivityErrorCountMetricData, expected2)).isEqualTo(0); } @Test public void testReadRowsApplicationLatencyWithAutoFlowControl() throws Exception { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); - final SettableApiFuture future = SettableApiFuture.create(); final AtomicInteger counter = new AtomicInteger(0); // For auto flow control, application latency is the time application spent in onResponse. stub.readRowsCallable() .call( - Query.create(TABLE_ID), + Query.create(TABLE), new ResponseObserver() { @Override public void onStart(StreamController streamController) {} @@ -383,37 +418,38 @@ public void onComplete() { }); future.get(); - verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture()); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(counter.get()).isEqualTo(fakeService.getResponseCounter().get()); - // Thread.sleep might not sleep for the requested amount depending on the interrupt period - // defined by the OS. - // On linux this is ~1ms but on windows may be as high as 15-20ms. - assertThat(applicationLatency.getValue()) - .isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get()); - assertThat(applicationLatency.getValue()) - .isAtMost(operationLatency.getValue() - SERVER_LATENCY); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = + getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); + long value = getAggregatedValue(applicationLatency, expectedAttributes); + + assertThat(value).isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get()); + + MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long operationLatencyValue = + getAggregatedValue( + operationLatency, + expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build()); + assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY); } @Test public void testReadRowsApplicationLatencyWithManualFlowControl() throws Exception { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - ArgumentCaptor applicationLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); int counter = 0; - Iterator rows = stub.readRowsCallable().call(Query.create(TABLE_ID)).iterator(); + Iterator rows = stub.readRowsCallable().call(Query.create(TABLE)).iterator(); while (rows.hasNext()) { counter++; @@ -421,148 +457,189 @@ public void testReadRowsApplicationLatencyWithManualFlowControl() throws Excepti rows.next(); } - verify(statsRecorderWrapper).putApplicationLatencies(applicationLatency.capture()); - verify(statsRecorderWrapper).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper) - .recordOperation(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = + getMetricData(allMetricData, APPLICATION_BLOCKING_LATENCIES_NAME); + + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.ReadRows") + .build(); - // For manual flow control, the last application latency shouldn't count, because at that point - // the server already sent back all the responses. + long value = getAggregatedValue(applicationLatency, expectedAttributes); + // For manual flow control, the last application latency shouldn't count, because at that + // point the server already sent back all the responses. assertThat(counter).isEqualTo(fakeService.getResponseCounter().get()); - assertThat(applicationLatency.getValue()) - .isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY); - assertThat(applicationLatency.getValue()) - .isAtMost(operationLatency.getValue() - SERVER_LATENCY); + assertThat(value).isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY); + + MetricData operationLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + long operationLatencyValue = + getAggregatedValue( + operationLatency, + expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build()); + assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY); } @Test - public void testRetryCount() { - when(mockFactory.newTracer(any(), any(), any())) - .thenAnswer( - (Answer) - invocationOnMock -> - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "MutateRow"), - statsRecorderWrapper)); - - ArgumentCaptor retryCount = ArgumentCaptor.forClass(Integer.class); - + public void testRetryCount() throws InterruptedException { stub.mutateRowCallable() - .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value")); - - // In TracedUnaryCallable, we create a future and add a TraceFinisher to the callback. Main - // thread is blocked on waiting for the future to be completed. When onComplete is called on - // the grpc thread, the future is completed, however we might not have enough time for - // TraceFinisher to run. Add a 1 second time out to wait for the callback. This shouldn't have - // any impact on production code. - verify(statsRecorderWrapper, timeout(1000)).putRetryCount(retryCount.capture()); + .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value")); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, RETRY_COUNT_NAME); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(STATUS_KEY, "OK") + .build(); - assertThat(retryCount.getValue()).isEqualTo(fakeService.getAttemptCounter().get() - 1); + long value = getAggregatedValue(metricData, expectedAttributes); + assertThat(value).isEqualTo(fakeService.getAttemptCounter().get() - 1); } @Test public void testMutateRowAttemptsTagValues() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper)); - stub.mutateRowCallable() - .call(RowMutation.create(TABLE_ID, "random-row").setCell("cf", "q", "value")); - - // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set - // attempt succeeded and set the response which will call complete() in AbstractFuture which - // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be - // called after the mutateRow call is returned. So there's a race between when the call returns - // and when the record() is called in onOperationCompletion(). - verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(zone.getAllValues()).containsExactly("global", "global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", CLUSTER); - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "UNAVAILABLE", "OK"); - assertThat(tableId.getAllValues()).containsExactly(TABLE_ID, TABLE_ID, TABLE_ID); + .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value")); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); + + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); + + verifyAttributes(metricData, expected1); + verifyAttributes(metricData, expected2); } @Test public void testMutateRowsPartialError() throws InterruptedException { + Batcher batcher = stub.newMutateRowsBatcher(TableId.of(TABLE), null); int numMutations = 6; - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - - Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null); for (int i = 0; i < numMutations; i++) { String key = i % 2 == 0 ? "key" : "fail-key"; batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v")); } - assertThrows(BatchingException.class, () -> batcher.close()); - - int expectedNumRequests = numMutations / batchElementCount; - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Assert.assertThrows(BatchingException.class, batcher::close); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); - assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER); - assertThat(status.getAllValues()).containsExactly("OK", "OK", "OK"); + verifyAttributes(metricData, expected); } @Test public void testMutateRowsRpcError() { + Batcher batcher = + stub.newMutateRowsBatcher(TableId.of(BAD_TABLE_ID), null); int numMutations = 6; - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - - Batcher batcher = stub.newMutateRowsBatcher(BAD_TABLE_ID, null); for (int i = 0; i < numMutations; i++) { - batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v")); + String key = i % 2 == 0 ? "key" : "fail-key"; + batcher.add(RowMutationEntry.create(key).setCell("f", "q", "v")); } - assertThrows(BatchingException.class, () -> batcher.close()); - - int expectedNumRequests = numMutations / batchElementCount; - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Assert.assertThrows(BatchingException.class, batcher::close); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "NOT_FOUND") + .put(TABLE_ID_KEY, BAD_TABLE_ID) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, false) + .build(); - assertThat(zone.getAllValues()).containsExactly("global", "global", "global"); - assertThat(cluster.getAllValues()).containsExactly("unspecified", "unspecified", "unspecified"); - assertThat(status.getAllValues()).containsExactly("NOT_FOUND", "NOT_FOUND", "NOT_FOUND"); + verifyAttributes(metricData, expected); } @Test public void testReadRowsAttemptsTagValues() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - Lists.newArrayList(stub.readRowsCallable().call(Query.create("fake-table")).iterator()); - // Set a timeout to reduce flakiness of this test. BasicRetryingFuture will set - // attempt succeeded and set the response which will call complete() in AbstractFuture which - // calls releaseWaiters(). onOperationComplete() is called in TracerFinisher which will be - // called after the mutateRow call is returned. So there's a race between when the call returns - // and when the record() is called in onOperationCompletion(). - verify(statsRecorderWrapper, timeout(50).times(fakeService.getAttemptCounter().get())) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); - assertThat(zone.getAllValues()).containsExactly("global", ZONE); - assertThat(cluster.getAllValues()).containsExactly("unspecified", CLUSTER); - assertThat(status.getAllValues()).containsExactly("UNAVAILABLE", "OK"); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData metricData = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected1 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "UNAVAILABLE") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, "global") + .put(CLUSTER_ID_KEY, "unspecified") + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, true) + .build(); + + Attributes expected2 = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "OK") + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .put(STREAMING_KEY, true) + .build(); + + verifyAttributes(metricData, expected1); + verifyAttributes(metricData, expected2); } @Test public void testBatchBlockingLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRows"), statsRecorderWrapper)); - try (Batcher batcher = stub.newMutateRowsBatcher(TABLE_ID, null)) { + try (Batcher batcher = stub.newMutateRowsBatcher(TABLE, null)) { for (int i = 0; i < 6; i++) { batcher.add(RowMutationEntry.create("key").setCell("f", "q", "v")); } @@ -571,86 +648,100 @@ public void testBatchBlockingLatencies() throws InterruptedException { batcher.close(); int expectedNumRequests = 6 / batchElementCount; - ArgumentCaptor throttledTime = ArgumentCaptor.forClass(Long.class); - verify(statsRecorderWrapper, timeout(1000).times(expectedNumRequests)) - .putClientBlockingLatencies(throttledTime.capture()); - // After the first request is sent, batcher will block on add because of the server latency. - // Blocking latency should be around server latency. - assertThat(throttledTime.getAllValues().get(1)).isAtLeast(SERVER_LATENCY - 10); - assertThat(throttledTime.getAllValues().get(2)).isAtLeast(SERVER_LATENCY - 10); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData applicationLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); - verify(statsRecorderWrapper, timeout(100).times(expectedNumRequests)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + Attributes expectedAttributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(ZONE_ID_KEY, ZONE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(METHOD_KEY, "Bigtable.MutateRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(zone.getAllValues()).containsExactly(ZONE, ZONE, ZONE); - assertThat(cluster.getAllValues()).containsExactly(CLUSTER, CLUSTER, CLUSTER); + long value = getAggregatedValue(applicationLatency, expectedAttributes); + // After the first request is sent, batcher will block on add because of the server latency. + // Blocking latency should be around server latency. So each data point would be at least + // (SERVER_LATENCY - 10). + long expected = (SERVER_LATENCY - 10) * (expectedNumRequests - 1) / expectedNumRequests; + assertThat(value).isAtLeast(expected); } } @Test - public void testQueuedOnChannelServerStreamLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - - stub.readRowsCallable().all().call(Query.create(TABLE_ID)); - - ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class); - - verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get())) - .putClientBlockingLatencies(blockedTime.capture()); + public void testQueuedOnChannelServerStreamLatencies() { + stub.readRowsCallable().all().call(Query.create(TABLE)); + + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); + + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(ZONE_ID_KEY, ZONE) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY); + long value = getAggregatedValue(clientLatency, attributes); + assertThat(value).isAtLeast(CHANNEL_BLOCKING_LATENCY); } @Test - public void testQueuedOnChannelUnaryLatencies() throws InterruptedException { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.Unary, SpanName.of("Bigtable", "MutateRow"), statsRecorderWrapper)); - stub.mutateRowCallable().call(RowMutation.create(TABLE_ID, "a-key").setCell("f", "q", "v")); + public void testQueuedOnChannelUnaryLatencies() { - ArgumentCaptor blockedTime = ArgumentCaptor.forClass(Long.class); + stub.mutateRowCallable().call(RowMutation.create(TABLE, "a-key").setCell("f", "q", "v")); - verify(statsRecorderWrapper, timeout(1000).times(fakeService.attemptCounter.get())) - .putClientBlockingLatencies(blockedTime.capture()); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData clientLatency = getMetricData(allMetricData, CLIENT_BLOCKING_LATENCIES_NAME); - assertThat(blockedTime.getAllValues().get(1)).isAtLeast(CHANNEL_BLOCKING_LATENCY); - assertThat(blockedTime.getAllValues().get(2)).isAtLeast(CHANNEL_BLOCKING_LATENCY); + Attributes attributes = + baseAttributes + .toBuilder() + .put(TABLE_ID_KEY, TABLE) + .put(CLUSTER_ID_KEY, CLUSTER) + .put(ZONE_ID_KEY, ZONE) + .put(METHOD_KEY, "Bigtable.MutateRow") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); + + long expected = CHANNEL_BLOCKING_LATENCY * 2 / 3; + long actual = getAggregatedValue(clientLatency, attributes); + assertThat(actual).isAtLeast(expected); } @Test public void testPermanentFailure() { - when(mockFactory.newTracer(any(), any(), any())) - .thenReturn( - new BuiltinMetricsTracer( - OperationType.ServerStreaming, - SpanName.of("Bigtable", "ReadRows"), - statsRecorderWrapper)); - try { Lists.newArrayList(stub.readRowsCallable().call(Query.create(BAD_TABLE_ID)).iterator()); Assert.fail("Request should throw not found error"); } catch (NotFoundException e) { } - ArgumentCaptor attemptLatency = ArgumentCaptor.forClass(Long.class); - ArgumentCaptor operationLatency = ArgumentCaptor.forClass(Long.class); + Collection allMetricData = metricReader.collectAllMetrics(); + MetricData attemptLatency = getMetricData(allMetricData, ATTEMPT_LATENCIES_NAME); + + Attributes expected = + baseAttributes + .toBuilder() + .put(STATUS_KEY, "NOT_FOUND") + .put(TABLE_ID_KEY, BAD_TABLE_ID) + .put(CLUSTER_ID_KEY, "unspecified") + .put(ZONE_ID_KEY, "global") + .put(STREAMING_KEY, true) + .put(METHOD_KEY, "Bigtable.ReadRows") + .put(CLIENT_NAME_KEY, CLIENT_NAME) + .build(); - verify(statsRecorderWrapper, timeout(50)).putAttemptLatencies(attemptLatency.capture()); - verify(statsRecorderWrapper, timeout(50)).putOperationLatencies(operationLatency.capture()); - verify(statsRecorderWrapper, timeout(50)) - .recordAttempt(status.capture(), tableId.capture(), zone.capture(), cluster.capture()); + verifyAttributes(attemptLatency, expected); - assertThat(status.getValue()).isEqualTo("NOT_FOUND"); - assertThat(tableId.getValue()).isEqualTo(BAD_TABLE_ID); - assertThat(cluster.getValue()).isEqualTo("unspecified"); - assertThat(zone.getValue()).isEqualTo("global"); + MetricData opLatency = getMetricData(allMetricData, OPERATION_LATENCIES_NAME); + verifyAttributes(opLatency, expected); } private static class FakeService extends BigtableGrpc.BigtableImplBase { diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java index a6670182b8..4ab19a5337 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ErrorCountPerConnectionTest.java @@ -23,17 +23,29 @@ import com.google.api.gax.grpc.ChannelPoolSettings; import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; import com.google.bigtable.v2.*; +import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.FakeServiceBuilder; import com.google.cloud.bigtable.data.v2.models.*; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.bigtable.stats.StatsRecorderWrapperForConnection; import io.grpc.Server; import io.grpc.Status; import io.grpc.StatusRuntimeException; import io.grpc.stub.StreamObserver; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.InstrumentSelector; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import io.opentelemetry.sdk.metrics.View; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader; +import java.util.ArrayList; +import java.util.Collection; import java.util.List; +import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import org.junit.After; import org.junit.Before; @@ -51,25 +63,50 @@ public class ErrorCountPerConnectionTest { private final FakeService fakeService = new FakeService(); private EnhancedBigtableStubSettings.Builder builder; private ArgumentCaptor runnableCaptor; - private StatsRecorderWrapperForConnection statsRecorderWrapperForConnection; + + private InMemoryMetricReader metricReader; + + private Attributes attributes; @Before public void setup() throws Exception { server = FakeServiceBuilder.create(fakeService).start(); ScheduledExecutorService executors = Mockito.mock(ScheduledExecutorService.class); + + attributes = + Attributes.builder() + .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, "fake-project") + .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, "fake-instance") + .put(BuiltinMetricsConstants.APP_PROFILE_KEY, "") + .put(BuiltinMetricsConstants.CLIENT_NAME_KEY, "bigtable-java/" + Version.VERSION) + .build(); + + metricReader = InMemoryMetricReader.create(); + + SdkMeterProviderBuilder meterProvider = + SdkMeterProvider.builder().registerMetricReader(metricReader); + + for (Map.Entry entry : + BuiltinMetricsConstants.getAllViews().entrySet()) { + meterProvider.registerView(entry.getKey(), entry.getValue()); + } + + OpenTelemetrySdk otel = + OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + builder = BigtableDataSettings.newBuilderForEmulator(server.getPort()) .stubSettings() .setBackgroundExecutorProvider(FixedExecutorProvider.create(executors)) .setProjectId("fake-project") - .setInstanceId("fake-instance"); + .setInstanceId("fake-instance") + .setMetricsProvider(CustomOpenTelemetryMetricsProvider.create(otel)); + runnableCaptor = ArgumentCaptor.forClass(Runnable.class); Mockito.when( executors.scheduleAtFixedRate(runnableCaptor.capture(), anyLong(), anyLong(), any())) .thenReturn(null); - - statsRecorderWrapperForConnection = Mockito.mock(StatsRecorderWrapperForConnection.class); } @After @@ -98,14 +135,21 @@ public void readWithOneChannel() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); + runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); + + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + + // Make sure the correct bucket is updated with the correct number of data points + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index = findDataPointIndex(point.getBoundaries(), errorCount); + assertThat(point.getCounts().get(index)).isEqualTo(1); } @Test @@ -131,28 +175,35 @@ public void readWithTwoChannels() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(2); - // Requests get assigned to channels using a Round Robin algorithm, so half to each. - assertThat(allErrorCounts).containsExactly(totalErrorCount / 2, totalErrorCount / 2); + long errorCountPerChannel = totalErrorCount / 2; + + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + + // The 2 channels should get equal amount of errors, so the totalErrorCount / 2 bucket is + // updated twice. + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index = findDataPointIndex(point.getBoundaries(), errorCountPerChannel); + assertThat(point.getCounts().get(index)).isEqualTo(2); } @Test public void readOverTwoPeriods() throws Exception { EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build()); - long errorCount = 0; + long errorCount1 = 0; for (int i = 0; i < 20; i++) { Query query; if (i % 3 == 0) { query = Query.create(ERROR_TABLE_NAME); - errorCount += 1; + errorCount1 += 1; } else { query = Query.create(SUCCESS_TABLE_NAME); } @@ -162,16 +213,9 @@ public void readOverTwoPeriods() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); - runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); - errorCount = 0; + runInterceptorTasksAndAssertCount(); + long errorCount2 = 0; for (int i = 0; i < 20; i++) { Query query; @@ -179,7 +223,7 @@ public void readOverTwoPeriods() throws Exception { query = Query.create(SUCCESS_TABLE_NAME); } else { query = Query.create(ERROR_TABLE_NAME); - errorCount += 1; + errorCount2 += 1; } try { stub.readRowsCallable().call(query).iterator().hasNext(); @@ -187,27 +231,22 @@ public void readOverTwoPeriods() throws Exception { // noop } } - errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); + runInterceptorTasksAndAssertCount(); - allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(errorCount); - } - @Test - public void ignoreInactiveConnection() throws Exception { - EnhancedBigtableStub stub = EnhancedBigtableStub.create(builder.build()); + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); - runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts).isEmpty(); + ArrayList histogramPointData = + new ArrayList<>(metricData.getHistogramData().getPoints()); + assertThat(histogramPointData.size()).isEqualTo(1); + HistogramPointData point = histogramPointData.get(0); + int index1 = findDataPointIndex(point.getBoundaries(), errorCount1); + int index2 = findDataPointIndex(point.getBoundaries(), errorCount2); + assertThat(point.getCounts().get(index1)).isEqualTo(1); + assertThat(point.getCounts().get(index2)).isEqualTo(1); } @Test @@ -221,22 +260,19 @@ public void noFailedRequests() throws Exception { // noop } } - ArgumentCaptor errorCountCaptor = ArgumentCaptor.forClass(long.class); - Mockito.doNothing() - .when(statsRecorderWrapperForConnection) - .putAndRecordPerConnectionErrorCount(errorCountCaptor.capture()); runInterceptorTasksAndAssertCount(); - List allErrorCounts = errorCountCaptor.getAllValues(); - assertThat(allErrorCounts.size()).isEqualTo(1); - assertThat(allErrorCounts.get(0)).isEqualTo(0); + Collection allMetrics = metricReader.collectAllMetrics(); + MetricData metricData = + BuiltinMetricsTestUtils.getMetricData( + allMetrics, BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME); + long value = BuiltinMetricsTestUtils.getAggregatedValue(metricData, attributes); + assertThat(value).isEqualTo(0); } private void runInterceptorTasksAndAssertCount() { int actualNumOfTasks = 0; for (Runnable runnable : runnableCaptor.getAllValues()) { if (runnable instanceof ErrorCountPerConnectionMetricTracker) { - ((ErrorCountPerConnectionMetricTracker) runnable) - .setStatsRecorderWrapperForConnection(statsRecorderWrapperForConnection); runnable.run(); actualNumOfTasks++; } @@ -244,6 +280,16 @@ private void runInterceptorTasksAndAssertCount() { assertThat(actualNumOfTasks).isEqualTo(1); } + private int findDataPointIndex(List boundaries, long dataPoint) { + int index = 0; + for (; index < boundaries.size(); index++) { + if (boundaries.get(index) >= dataPoint) { + break; + } + } + return index; + } + static class FakeService extends BigtableGrpc.BigtableImplBase { @Override public void readRows( diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java index 15bd9171f0..d72eac4056 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java @@ -39,7 +39,6 @@ import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; -import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; import com.google.common.base.Stopwatch; import com.google.common.collect.ImmutableMap; @@ -120,15 +119,20 @@ public void setUp() throws Exception { .setInstanceId(INSTANCE_ID) .setAppProfileId(APP_PROFILE_ID) .build(); - EnhancedBigtableStubSettings stubSettings = - settings - .getStubSettings() + + ClientContext clientContext = + EnhancedBigtableStub.createClientContext(settings.getStubSettings()); + clientContext = + clientContext .toBuilder() .setTracerFactory( EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder())) + settings.getStubSettings(), + Tags.getTagger(), + localStats.getStatsRecorder(), + null)) .build(); - stub = new EnhancedBigtableStub(stubSettings, ClientContext.create(stubSettings)); + stub = new EnhancedBigtableStub(settings.getStubSettings(), clientContext); } @After diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/test_helpers/env/TestEnvRule.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/test_helpers/env/TestEnvRule.java index d4470637af..3b2ebb151c 100644 --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/test_helpers/env/TestEnvRule.java +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/test_helpers/env/TestEnvRule.java @@ -24,6 +24,7 @@ import com.google.cloud.bigtable.admin.v2.models.AppProfile; import com.google.cloud.bigtable.admin.v2.models.Cluster; import com.google.cloud.bigtable.admin.v2.models.Instance; +import com.google.cloud.bigtable.admin.v2.models.UpdateAuthorizedViewRequest; import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.base.Strings; @@ -203,6 +204,7 @@ private void cleanupStaleTables(String stalePrefix) { continue; } if (stalePrefix.compareTo(tableId) > 0) { + prepTableForDelete(tableId); try { env().getTableAdminClient().deleteTable(tableId); } catch (NotFoundException ignored) { @@ -212,6 +214,21 @@ private void cleanupStaleTables(String stalePrefix) { } } + private void prepTableForDelete(String tableId) { + // Unprotected views + if (!(env() instanceof EmulatorEnv)) { + for (String viewId : env().getTableAdminClient().listAuthorizedViews(tableId)) { + try { + env() + .getTableAdminClient() + .updateAuthorizedView( + UpdateAuthorizedViewRequest.of(tableId, viewId).setDeletionProtection(false)); + } catch (NotFoundException ignored) { + } + } + } + } + /** * Clean up AppProfile that were dynamically created in the default instance that have been * orphaned. diff --git a/grpc-google-cloud-bigtable-admin-v2/pom.xml b/grpc-google-cloud-bigtable-admin-v2/pom.xml index 4606ec8b84..c356f47fed 100644 --- a/grpc-google-cloud-bigtable-admin-v2/pom.xml +++ b/grpc-google-cloud-bigtable-admin-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigtable-admin-v2 - 2.37.0 + 2.38.0 grpc-google-cloud-bigtable-admin-v2 GRPC library for grpc-google-cloud-bigtable-admin-v2 com.google.cloud google-cloud-bigtable-parent - 2.37.0 + 2.38.0 @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.0 + 2.38.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.37.0 + 2.38.0 pom import diff --git a/grpc-google-cloud-bigtable-v2/pom.xml b/grpc-google-cloud-bigtable-v2/pom.xml index 3e89608789..297f0c1c37 100644 --- a/grpc-google-cloud-bigtable-v2/pom.xml +++ b/grpc-google-cloud-bigtable-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigtable-v2 - 2.37.0 + 2.38.0 grpc-google-cloud-bigtable-v2 GRPC library for grpc-google-cloud-bigtable-v2 com.google.cloud google-cloud-bigtable-parent - 2.37.0 + 2.38.0 @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.0 + 2.38.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.37.0 + 2.38.0 pom import diff --git a/pom.xml b/pom.xml index ffa93e753e..a94d96edbf 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ google-cloud-bigtable-parent pom - 2.37.0 + 2.38.0 Google Cloud Bigtable Parent https://github.com/googleapis/java-bigtable @@ -153,27 +153,27 @@ com.google.api.grpc proto-google-cloud-bigtable-v2 - 2.37.0 + 2.38.0 com.google.api.grpc proto-google-cloud-bigtable-admin-v2 - 2.37.0 + 2.38.0 com.google.api.grpc grpc-google-cloud-bigtable-v2 - 2.37.0 + 2.38.0 com.google.api.grpc grpc-google-cloud-bigtable-admin-v2 - 2.37.0 + 2.38.0 com.google.cloud google-cloud-bigtable - 2.37.0 + 2.38.0 @@ -347,22 +347,6 @@ - - - - with-shaded - - - !skip-shaded - - - - google-cloud-bigtable-stats - - diff --git a/proto-google-cloud-bigtable-admin-v2/pom.xml b/proto-google-cloud-bigtable-admin-v2/pom.xml index c06610f581..8321a0b6a0 100644 --- a/proto-google-cloud-bigtable-admin-v2/pom.xml +++ b/proto-google-cloud-bigtable-admin-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigtable-admin-v2 - 2.37.0 + 2.38.0 proto-google-cloud-bigtable-admin-v2 PROTO library for proto-google-cloud-bigtable-admin-v2 com.google.cloud google-cloud-bigtable-parent - 2.37.0 + 2.38.0 @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.0 + 2.38.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.37.0 + 2.38.0 pom import diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfile.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfile.java index e9fbfaa569..e96392a2f0 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfile.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfile.java @@ -2382,6 +2382,883 @@ public com.google.bigtable.admin.v2.AppProfile.StandardIsolation getDefaultInsta } } + public interface DataBoostIsolationReadOnlyOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+     * The Compute Billing Owner for this Data Boost App Profile.
+     * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return Whether the computeBillingOwner field is set. + */ + boolean hasComputeBillingOwner(); + /** + * + * + *
+     * The Compute Billing Owner for this Data Boost App Profile.
+     * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return The enum numeric value on the wire for computeBillingOwner. + */ + int getComputeBillingOwnerValue(); + /** + * + * + *
+     * The Compute Billing Owner for this Data Boost App Profile.
+     * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return The computeBillingOwner. + */ + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + getComputeBillingOwner(); + } + /** + * + * + *
+   * Data Boost is a serverless compute capability that lets you run
+   * high-throughput read jobs on your Bigtable data, without impacting the
+   * performance of the clusters that handle your application traffic.
+   * Currently, Data Boost exclusively supports read-only use-cases with
+   * single-cluster routing.
+   *
+   * Data Boost reads are only guaranteed to see the results of writes that
+   * were written at least 30 minutes ago. This means newly written values may
+   * not become visible for up to 30m, and also means that old values may
+   * remain visible for up to 30m after being deleted or overwritten. To
+   * mitigate the staleness of the data, users may either wait 30m, or use
+   * CheckConsistency.
+   * 
+ * + * Protobuf type {@code google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly} + */ + public static final class DataBoostIsolationReadOnly + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) + DataBoostIsolationReadOnlyOrBuilder { + private static final long serialVersionUID = 0L; + // Use DataBoostIsolationReadOnly.newBuilder() to construct. + private DataBoostIsolationReadOnly(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private DataBoostIsolationReadOnly() { + computeBillingOwner_ = 0; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new DataBoostIsolationReadOnly(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.InstanceProto + .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.InstanceProto + .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.class, + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder.class); + } + + /** + * + * + *
+     * Compute Billing Owner specifies how usage should be accounted when using
+     * Data Boost. Compute Billing Owner also configures which Cloud Project is
+     * charged for relevant quota.
+     * 
+ * + * Protobuf enum {@code + * google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner} + */ + public enum ComputeBillingOwner implements com.google.protobuf.ProtocolMessageEnum { + /** + * + * + *
+       * Unspecified value.
+       * 
+ * + * COMPUTE_BILLING_OWNER_UNSPECIFIED = 0; + */ + COMPUTE_BILLING_OWNER_UNSPECIFIED(0), + /** + * + * + *
+       * The host Cloud Project containing the targeted Bigtable Instance /
+       * Table pays for compute.
+       * 
+ * + * HOST_PAYS = 1; + */ + HOST_PAYS(1), + UNRECOGNIZED(-1), + ; + + /** + * + * + *
+       * Unspecified value.
+       * 
+ * + * COMPUTE_BILLING_OWNER_UNSPECIFIED = 0; + */ + public static final int COMPUTE_BILLING_OWNER_UNSPECIFIED_VALUE = 0; + /** + * + * + *
+       * The host Cloud Project containing the targeted Bigtable Instance /
+       * Table pays for compute.
+       * 
+ * + * HOST_PAYS = 1; + */ + public static final int HOST_PAYS_VALUE = 1; + + public final int getNumber() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalArgumentException( + "Can't get the number of an unknown enum value."); + } + return value; + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static ComputeBillingOwner valueOf(int value) { + return forNumber(value); + } + + /** + * @param value The numeric wire value of the corresponding enum entry. + * @return The enum associated with the given numeric wire value. + */ + public static ComputeBillingOwner forNumber(int value) { + switch (value) { + case 0: + return COMPUTE_BILLING_OWNER_UNSPECIFIED; + case 1: + return HOST_PAYS; + default: + return null; + } + } + + public static com.google.protobuf.Internal.EnumLiteMap + internalGetValueMap() { + return internalValueMap; + } + + private static final com.google.protobuf.Internal.EnumLiteMap + internalValueMap = + new com.google.protobuf.Internal.EnumLiteMap() { + public ComputeBillingOwner findValueByNumber(int number) { + return ComputeBillingOwner.forNumber(number); + } + }; + + public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { + if (this == UNRECOGNIZED) { + throw new java.lang.IllegalStateException( + "Can't get the descriptor of an unrecognized enum value."); + } + return getDescriptor().getValues().get(ordinal()); + } + + public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { + return getDescriptor(); + } + + public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { + return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.getDescriptor() + .getEnumTypes() + .get(0); + } + + private static final ComputeBillingOwner[] VALUES = values(); + + public static ComputeBillingOwner valueOf( + com.google.protobuf.Descriptors.EnumValueDescriptor desc) { + if (desc.getType() != getDescriptor()) { + throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); + } + if (desc.getIndex() == -1) { + return UNRECOGNIZED; + } + return VALUES[desc.getIndex()]; + } + + private final int value; + + private ComputeBillingOwner(int value) { + this.value = value; + } + + // @@protoc_insertion_point(enum_scope:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner) + } + + private int bitField0_; + public static final int COMPUTE_BILLING_OWNER_FIELD_NUMBER = 1; + private int computeBillingOwner_ = 0; + /** + * + * + *
+     * The Compute Billing Owner for this Data Boost App Profile.
+     * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return Whether the computeBillingOwner field is set. + */ + @java.lang.Override + public boolean hasComputeBillingOwner() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
+     * The Compute Billing Owner for this Data Boost App Profile.
+     * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return The enum numeric value on the wire for computeBillingOwner. + */ + @java.lang.Override + public int getComputeBillingOwnerValue() { + return computeBillingOwner_; + } + /** + * + * + *
+     * The Compute Billing Owner for this Data Boost App Profile.
+     * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return The computeBillingOwner. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + getComputeBillingOwner() { + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + result = + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + .forNumber(computeBillingOwner_); + return result == null + ? com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + .UNRECOGNIZED + : result; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeEnum(1, computeBillingOwner_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeEnumSize(1, computeBillingOwner_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly)) { + return super.equals(obj); + } + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly other = + (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) obj; + + if (hasComputeBillingOwner() != other.hasComputeBillingOwner()) return false; + if (hasComputeBillingOwner()) { + if (computeBillingOwner_ != other.computeBillingOwner_) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasComputeBillingOwner()) { + hash = (37 * hash) + COMPUTE_BILLING_OWNER_FIELD_NUMBER; + hash = (53 * hash) + computeBillingOwner_; + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType( + com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+     * Data Boost is a serverless compute capability that lets you run
+     * high-throughput read jobs on your Bigtable data, without impacting the
+     * performance of the clusters that handle your application traffic.
+     * Currently, Data Boost exclusively supports read-only use-cases with
+     * single-cluster routing.
+     *
+     * Data Boost reads are only guaranteed to see the results of writes that
+     * were written at least 30 minutes ago. This means newly written values may
+     * not become visible for up to 30m, and also means that old values may
+     * remain visible for up to 30m after being deleted or overwritten. To
+     * mitigate the staleness of the data, users may either wait 30m, or use
+     * CheckConsistency.
+     * 
+ * + * Protobuf type {@code google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly} + */ + public static final class Builder + extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.InstanceProto + .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.InstanceProto + .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.class, + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder.class); + } + + // Construct using + // com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + computeBillingOwner_ = 0; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.bigtable.admin.v2.InstanceProto + .internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + getDefaultInstanceForType() { + return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly build() { + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly buildPartial() { + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly result = + new com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.computeBillingOwner_ = computeBillingOwner_; + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, + int index, + java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) { + return mergeFrom( + (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly other) { + if (other + == com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .getDefaultInstance()) return this; + if (other.hasComputeBillingOwner()) { + setComputeBillingOwner(other.getComputeBillingOwner()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 8: + { + computeBillingOwner_ = input.readEnum(); + bitField0_ |= 0x00000001; + break; + } // case 8 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private int computeBillingOwner_ = 0; + /** + * + * + *
+       * The Compute Billing Owner for this Data Boost App Profile.
+       * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return Whether the computeBillingOwner field is set. + */ + @java.lang.Override + public boolean hasComputeBillingOwner() { + return ((bitField0_ & 0x00000001) != 0); + } + /** + * + * + *
+       * The Compute Billing Owner for this Data Boost App Profile.
+       * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return The enum numeric value on the wire for computeBillingOwner. + */ + @java.lang.Override + public int getComputeBillingOwnerValue() { + return computeBillingOwner_; + } + /** + * + * + *
+       * The Compute Billing Owner for this Data Boost App Profile.
+       * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @param value The enum numeric value on the wire for computeBillingOwner to set. + * @return This builder for chaining. + */ + public Builder setComputeBillingOwnerValue(int value) { + computeBillingOwner_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + /** + * + * + *
+       * The Compute Billing Owner for this Data Boost App Profile.
+       * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return The computeBillingOwner. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + getComputeBillingOwner() { + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + result = + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .ComputeBillingOwner.forNumber(computeBillingOwner_); + return result == null + ? com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + .UNRECOGNIZED + : result; + } + /** + * + * + *
+       * The Compute Billing Owner for this Data Boost App Profile.
+       * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @param value The computeBillingOwner to set. + * @return This builder for chaining. + */ + public Builder setComputeBillingOwner( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner + value) { + if (value == null) { + throw new NullPointerException(); + } + bitField0_ |= 0x00000001; + computeBillingOwner_ = value.getNumber(); + onChanged(); + return this; + } + /** + * + * + *
+       * The Compute Billing Owner for this Data Boost App Profile.
+       * 
+ * + * + * optional .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.ComputeBillingOwner compute_billing_owner = 1; + * + * + * @return This builder for chaining. + */ + public Builder clearComputeBillingOwner() { + bitField0_ = (bitField0_ & ~0x00000001); + computeBillingOwner_ = 0; + onChanged(); + return this; + } + + @java.lang.Override + public final Builder setUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) + private static final com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly(); + } + + public static com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DataBoostIsolationReadOnly parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException() + .setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } + } + private int routingPolicyCase_ = 0; @SuppressWarnings("serial") @@ -2443,6 +3320,7 @@ public enum IsolationCase @java.lang.Deprecated PRIORITY(7), STANDARD_ISOLATION(11), + DATA_BOOST_ISOLATION_READ_ONLY(10), ISOLATION_NOT_SET(0); private final int value; @@ -2465,6 +3343,8 @@ public static IsolationCase forNumber(int value) { return PRIORITY; case 11: return STANDARD_ISOLATION; + case 10: + return DATA_BOOST_ISOLATION_READ_ONLY; case 0: return ISOLATION_NOT_SET; default: @@ -2778,7 +3658,7 @@ public com.google.bigtable.admin.v2.AppProfile.SingleClusterRouting getSingleClu * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return Whether the priority field is set. */ @java.lang.Deprecated @@ -2798,7 +3678,7 @@ public boolean hasPriority() { * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return The enum numeric value on the wire for priority. */ @java.lang.Deprecated @@ -2821,7 +3701,7 @@ public int getPriorityValue() { * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return The priority. */ @java.lang.Deprecated @@ -2892,6 +3772,68 @@ public com.google.bigtable.admin.v2.AppProfile.StandardIsolation getStandardIsol return com.google.bigtable.admin.v2.AppProfile.StandardIsolation.getDefaultInstance(); } + public static final int DATA_BOOST_ISOLATION_READ_ONLY_FIELD_NUMBER = 10; + /** + * + * + *
+   * Specifies that this app profile is intended for read-only usage via the
+   * Data Boost feature.
+   * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + * + * @return Whether the dataBoostIsolationReadOnly field is set. + */ + @java.lang.Override + public boolean hasDataBoostIsolationReadOnly() { + return isolationCase_ == 10; + } + /** + * + * + *
+   * Specifies that this app profile is intended for read-only usage via the
+   * Data Boost feature.
+   * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + * + * @return The dataBoostIsolationReadOnly. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + getDataBoostIsolationReadOnly() { + if (isolationCase_ == 10) { + return (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_; + } + return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.getDefaultInstance(); + } + /** + * + * + *
+   * Specifies that this app profile is intended for read-only usage via the
+   * Data Boost feature.
+   * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder + getDataBoostIsolationReadOnlyOrBuilder() { + if (isolationCase_ == 10) { + return (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_; + } + return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -2926,6 +3868,10 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (isolationCase_ == 7) { output.writeEnum(7, ((java.lang.Integer) isolation_)); } + if (isolationCase_ == 10) { + output.writeMessage( + 10, (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_); + } if (isolationCase_ == 11) { output.writeMessage( 11, (com.google.bigtable.admin.v2.AppProfile.StandardIsolation) isolation_); @@ -2964,6 +3910,11 @@ public int getSerializedSize() { com.google.protobuf.CodedOutputStream.computeEnumSize( 7, ((java.lang.Integer) isolation_)); } + if (isolationCase_ == 10) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 10, (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_); + } if (isolationCase_ == 11) { size += com.google.protobuf.CodedOutputStream.computeMessageSize( @@ -3007,6 +3958,10 @@ public boolean equals(final java.lang.Object obj) { case 11: if (!getStandardIsolation().equals(other.getStandardIsolation())) return false; break; + case 10: + if (!getDataBoostIsolationReadOnly().equals(other.getDataBoostIsolationReadOnly())) + return false; + break; case 0: default: } @@ -3048,6 +4003,10 @@ public int hashCode() { hash = (37 * hash) + STANDARD_ISOLATION_FIELD_NUMBER; hash = (53 * hash) + getStandardIsolation().hashCode(); break; + case 10: + hash = (37 * hash) + DATA_BOOST_ISOLATION_READ_ONLY_FIELD_NUMBER; + hash = (53 * hash) + getDataBoostIsolationReadOnly().hashCode(); + break; case 0: default: } @@ -3203,6 +4162,9 @@ public Builder clear() { if (standardIsolationBuilder_ != null) { standardIsolationBuilder_.clear(); } + if (dataBoostIsolationReadOnlyBuilder_ != null) { + dataBoostIsolationReadOnlyBuilder_.clear(); + } routingPolicyCase_ = 0; routingPolicy_ = null; isolationCase_ = 0; @@ -3269,6 +4231,9 @@ private void buildPartialOneofs(com.google.bigtable.admin.v2.AppProfile result) if (isolationCase_ == 11 && standardIsolationBuilder_ != null) { result.isolation_ = standardIsolationBuilder_.build(); } + if (isolationCase_ == 10 && dataBoostIsolationReadOnlyBuilder_ != null) { + result.isolation_ = dataBoostIsolationReadOnlyBuilder_.build(); + } } @java.lang.Override @@ -3358,6 +4323,11 @@ public Builder mergeFrom(com.google.bigtable.admin.v2.AppProfile other) { mergeStandardIsolation(other.getStandardIsolation()); break; } + case DATA_BOOST_ISOLATION_READ_ONLY: + { + mergeDataBoostIsolationReadOnly(other.getDataBoostIsolationReadOnly()); + break; + } case ISOLATION_NOT_SET: { break; @@ -3428,6 +4398,13 @@ public Builder mergeFrom( isolation_ = rawValue; break; } // case 56 + case 82: + { + input.readMessage( + getDataBoostIsolationReadOnlyFieldBuilder().getBuilder(), extensionRegistry); + isolationCase_ = 10; + break; + } // case 82 case 90: { input.readMessage( @@ -4318,7 +5295,7 @@ public Builder clearSingleClusterRouting() { * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return Whether the priority field is set. */ @java.lang.Override @@ -4339,7 +5316,7 @@ public boolean hasPriority() { * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return The enum numeric value on the wire for priority. */ @java.lang.Override @@ -4363,7 +5340,7 @@ public int getPriorityValue() { * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @param value The enum numeric value on the wire for priority to set. * @return This builder for chaining. */ @@ -4387,7 +5364,7 @@ public Builder setPriorityValue(int value) { * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return The priority. */ @java.lang.Override @@ -4416,7 +5393,7 @@ public com.google.bigtable.admin.v2.AppProfile.Priority getPriority() { * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @param value The priority to set. * @return This builder for chaining. */ @@ -4443,7 +5420,7 @@ public Builder setPriority(com.google.bigtable.admin.v2.AppProfile.Priority valu * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return This builder for chaining. */ @java.lang.Deprecated @@ -4680,6 +5657,256 @@ public Builder clearStandardIsolation() { return standardIsolationBuilder_; } + private com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly, + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder, + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder> + dataBoostIsolationReadOnlyBuilder_; + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + * + * @return Whether the dataBoostIsolationReadOnly field is set. + */ + @java.lang.Override + public boolean hasDataBoostIsolationReadOnly() { + return isolationCase_ == 10; + } + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + * + * @return The dataBoostIsolationReadOnly. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + getDataBoostIsolationReadOnly() { + if (dataBoostIsolationReadOnlyBuilder_ == null) { + if (isolationCase_ == 10) { + return (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_; + } + return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .getDefaultInstance(); + } else { + if (isolationCase_ == 10) { + return dataBoostIsolationReadOnlyBuilder_.getMessage(); + } + return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .getDefaultInstance(); + } + } + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + public Builder setDataBoostIsolationReadOnly( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly value) { + if (dataBoostIsolationReadOnlyBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + isolation_ = value; + onChanged(); + } else { + dataBoostIsolationReadOnlyBuilder_.setMessage(value); + } + isolationCase_ = 10; + return this; + } + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + public Builder setDataBoostIsolationReadOnly( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder + builderForValue) { + if (dataBoostIsolationReadOnlyBuilder_ == null) { + isolation_ = builderForValue.build(); + onChanged(); + } else { + dataBoostIsolationReadOnlyBuilder_.setMessage(builderForValue.build()); + } + isolationCase_ = 10; + return this; + } + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + public Builder mergeDataBoostIsolationReadOnly( + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly value) { + if (dataBoostIsolationReadOnlyBuilder_ == null) { + if (isolationCase_ == 10 + && isolation_ + != com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .getDefaultInstance()) { + isolation_ = + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.newBuilder( + (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) + isolation_) + .mergeFrom(value) + .buildPartial(); + } else { + isolation_ = value; + } + onChanged(); + } else { + if (isolationCase_ == 10) { + dataBoostIsolationReadOnlyBuilder_.mergeFrom(value); + } else { + dataBoostIsolationReadOnlyBuilder_.setMessage(value); + } + } + isolationCase_ = 10; + return this; + } + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + public Builder clearDataBoostIsolationReadOnly() { + if (dataBoostIsolationReadOnlyBuilder_ == null) { + if (isolationCase_ == 10) { + isolationCase_ = 0; + isolation_ = null; + onChanged(); + } + } else { + if (isolationCase_ == 10) { + isolationCase_ = 0; + isolation_ = null; + } + dataBoostIsolationReadOnlyBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder + getDataBoostIsolationReadOnlyBuilder() { + return getDataBoostIsolationReadOnlyFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + @java.lang.Override + public com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder + getDataBoostIsolationReadOnlyOrBuilder() { + if ((isolationCase_ == 10) && (dataBoostIsolationReadOnlyBuilder_ != null)) { + return dataBoostIsolationReadOnlyBuilder_.getMessageOrBuilder(); + } else { + if (isolationCase_ == 10) { + return (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_; + } + return com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .getDefaultInstance(); + } + } + /** + * + * + *
+     * Specifies that this app profile is intended for read-only usage via the
+     * Data Boost feature.
+     * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly, + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder, + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder> + getDataBoostIsolationReadOnlyFieldBuilder() { + if (dataBoostIsolationReadOnlyBuilder_ == null) { + if (!(isolationCase_ == 10)) { + isolation_ = + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + .getDefaultInstance(); + } + dataBoostIsolationReadOnlyBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly, + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly.Builder, + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder>( + (com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly) isolation_, + getParentForChildren(), + isClean()); + isolation_ = null; + } + isolationCase_ = 10; + onChanged(); + return dataBoostIsolationReadOnlyBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfileOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfileOrBuilder.java index 262136da9e..e5fa6a2fa5 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfileOrBuilder.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/AppProfileOrBuilder.java @@ -209,7 +209,7 @@ public interface AppProfileOrBuilder * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return Whether the priority field is set. */ @java.lang.Deprecated @@ -227,7 +227,7 @@ public interface AppProfileOrBuilder * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return The enum numeric value on the wire for priority. */ @java.lang.Deprecated @@ -245,7 +245,7 @@ public interface AppProfileOrBuilder * .google.bigtable.admin.v2.AppProfile.Priority priority = 7 [deprecated = true]; * * @deprecated google.bigtable.admin.v2.AppProfile.priority is deprecated. See - * google/bigtable/admin/v2/instance.proto;l=332 + * google/bigtable/admin/v2/instance.proto;l=361 * @return The priority. */ @java.lang.Deprecated @@ -290,6 +290,52 @@ public interface AppProfileOrBuilder com.google.bigtable.admin.v2.AppProfile.StandardIsolationOrBuilder getStandardIsolationOrBuilder(); + /** + * + * + *
+   * Specifies that this app profile is intended for read-only usage via the
+   * Data Boost feature.
+   * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + * + * @return Whether the dataBoostIsolationReadOnly field is set. + */ + boolean hasDataBoostIsolationReadOnly(); + /** + * + * + *
+   * Specifies that this app profile is intended for read-only usage via the
+   * Data Boost feature.
+   * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + * + * @return The dataBoostIsolationReadOnly. + */ + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly + getDataBoostIsolationReadOnly(); + /** + * + * + *
+   * Specifies that this app profile is intended for read-only usage via the
+   * Data Boost feature.
+   * 
+ * + * + * .google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; + * + */ + com.google.bigtable.admin.v2.AppProfile.DataBoostIsolationReadOnlyOrBuilder + getDataBoostIsolationReadOnlyOrBuilder(); + com.google.bigtable.admin.v2.AppProfile.RoutingPolicyCase getRoutingPolicyCase(); com.google.bigtable.admin.v2.AppProfile.IsolationCase getIsolationCase(); diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminProto.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminProto.java index dc4d0a0a0b..a9290a9ae7 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminProto.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/BigtableTableAdminProto.java @@ -108,6 +108,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_bigtable_admin_v2_CheckConsistencyRequest_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_bigtable_admin_v2_CheckConsistencyRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_bigtable_admin_v2_CheckConsistencyResponse_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -297,277 +305,283 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "nerateConsistencyTokenRequest\0228\n\004name\030\001 " + "\001(\tB*\340A\002\372A$\n\"bigtableadmin.googleapis.co" + "m/Table\"=\n GenerateConsistencyTokenRespo" - + "nse\022\031\n\021consistency_token\030\001 \001(\t\"s\n\027CheckC" - + "onsistencyRequest\0228\n\004name\030\001 \001(\tB*\340A\002\372A$\n" - + "\"bigtableadmin.googleapis.com/Table\022\036\n\021c" - + "onsistency_token\030\002 \001(\tB\003\340A\002\".\n\030CheckCons" - + "istencyResponse\022\022\n\nconsistent\030\001 \001(\010\"\346\001\n\024" - + "SnapshotTableRequest\0228\n\004name\030\001 \001(\tB*\340A\002\372" - + "A$\n\"bigtableadmin.googleapis.com/Table\022=" - + "\n\007cluster\030\002 \001(\tB,\340A\002\372A&\n$bigtableadmin.g" - + "oogleapis.com/Cluster\022\030\n\013snapshot_id\030\003 \001" - + "(\tB\003\340A\002\022&\n\003ttl\030\004 \001(\0132\031.google.protobuf.D" - + "uration\022\023\n\013description\030\005 \001(\t\"Q\n\022GetSnaps" - + "hotRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%bigtab" - + "leadmin.googleapis.com/Snapshot\"{\n\024ListS" - + "napshotsRequest\022<\n\006parent\030\001 \001(\tB,\340A\002\372A&\n" - + "$bigtableadmin.googleapis.com/Cluster\022\021\n" - + "\tpage_size\030\002 \001(\005\022\022\n\npage_token\030\003 \001(\t\"g\n\025" - + "ListSnapshotsResponse\0225\n\tsnapshots\030\001 \003(\013" - + "2\".google.bigtable.admin.v2.Snapshot\022\027\n\017" - + "next_page_token\030\002 \001(\t\"T\n\025DeleteSnapshotR" - + "equest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%bigtablead" - + "min.googleapis.com/Snapshot\"\304\001\n\025Snapshot" - + "TableMetadata\022H\n\020original_request\030\001 \001(\0132" - + "..google.bigtable.admin.v2.SnapshotTable" - + "Request\0220\n\014request_time\030\002 \001(\0132\032.google.p" - + "rotobuf.Timestamp\022/\n\013finish_time\030\003 \001(\0132\032" - + ".google.protobuf.Timestamp\"\330\001\n\037CreateTab" - + "leFromSnapshotMetadata\022R\n\020original_reque" - + "st\030\001 \001(\01328.google.bigtable.admin.v2.Crea" - + "teTableFromSnapshotRequest\0220\n\014request_ti" - + "me\030\002 \001(\0132\032.google.protobuf.Timestamp\022/\n\013" - + "finish_time\030\003 \001(\0132\032.google.protobuf.Time" - + "stamp\"\242\001\n\023CreateBackupRequest\022<\n\006parent\030" - + "\001 \001(\tB,\340A\002\372A&\n$bigtableadmin.googleapis." - + "com/Cluster\022\026\n\tbackup_id\030\002 \001(\tB\003\340A\002\0225\n\006b" - + "ackup\030\003 \001(\0132 .google.bigtable.admin.v2.B" - + "ackupB\003\340A\002\"\230\001\n\024CreateBackupMetadata\022\014\n\004n" - + "ame\030\001 \001(\t\022\024\n\014source_table\030\002 \001(\t\022.\n\nstart" - + "_time\030\003 \001(\0132\032.google.protobuf.Timestamp\022" - + ",\n\010end_time\030\004 \001(\0132\032.google.protobuf.Time" - + "stamp\"\202\001\n\023UpdateBackupRequest\0225\n\006backup\030" - + "\001 \001(\0132 .google.bigtable.admin.v2.BackupB" - + "\003\340A\002\0224\n\013update_mask\030\002 \001(\0132\032.google.proto" - + "buf.FieldMaskB\003\340A\002\"M\n\020GetBackupRequest\0229" - + "\n\004name\030\001 \001(\tB+\340A\002\372A%\n#bigtableadmin.goog" - + "leapis.com/Backup\"P\n\023DeleteBackupRequest" - + "\0229\n\004name\030\001 \001(\tB+\340A\002\372A%\n#bigtableadmin.go" - + "ogleapis.com/Backup\"\233\001\n\022ListBackupsReque" - + "st\022<\n\006parent\030\001 \001(\tB,\340A\002\372A&\n$bigtableadmi" - + "n.googleapis.com/Cluster\022\016\n\006filter\030\002 \001(\t" - + "\022\020\n\010order_by\030\003 \001(\t\022\021\n\tpage_size\030\004 \001(\005\022\022\n" - + "\npage_token\030\005 \001(\t\"a\n\023ListBackupsResponse" - + "\0221\n\007backups\030\001 \003(\0132 .google.bigtable.admi" - + "n.v2.Backup\022\027\n\017next_page_token\030\002 \001(\t\"\343\001\n" - + "\021CopyBackupRequest\022<\n\006parent\030\001 \001(\tB,\340A\002\372" - + "A&\n$bigtableadmin.googleapis.com/Cluster" - + "\022\026\n\tbackup_id\030\002 \001(\tB\003\340A\002\022B\n\rsource_backu" - + "p\030\003 \001(\tB+\340A\002\372A%\n#bigtableadmin.googleapi" - + "s.com/Backup\0224\n\013expire_time\030\004 \001(\0132\032.goog" - + "le.protobuf.TimestampB\003\340A\002\"\315\001\n\022CopyBacku" - + "pMetadata\0226\n\004name\030\001 \001(\tB(\372A%\n#bigtablead" - + "min.googleapis.com/Backup\022@\n\022source_back" - + "up_info\030\002 \001(\0132$.google.bigtable.admin.v2" - + ".BackupInfo\022=\n\010progress\030\003 \001(\0132+.google.b" - + "igtable.admin.v2.OperationProgress\"\313\001\n\033C" - + "reateAuthorizedViewRequest\022C\n\006parent\030\001 \001" - + "(\tB3\340A\002\372A-\022+bigtableadmin.googleapis.com" - + "/AuthorizedView\022\037\n\022authorized_view_id\030\002 " - + "\001(\tB\003\340A\002\022F\n\017authorized_view\030\003 \001(\0132(.goog" - + "le.bigtable.admin.v2.AuthorizedViewB\003\340A\002" - + "\"\322\001\n\034CreateAuthorizedViewMetadata\022O\n\020ori" - + "ginal_request\030\001 \001(\01325.google.bigtable.ad" - + "min.v2.CreateAuthorizedViewRequest\0220\n\014re" + + "nse\022\031\n\021consistency_token\030\001 \001(\t\"\262\002\n\027Check" + + "ConsistencyRequest\0228\n\004name\030\001 \001(\tB*\340A\002\372A$" + + "\n\"bigtableadmin.googleapis.com/Table\022\036\n\021" + + "consistency_token\030\002 \001(\tB\003\340A\002\022Y\n\033standard" + + "_read_remote_writes\030\003 \001(\01322.google.bigta" + + "ble.admin.v2.StandardReadRemoteWritesH\000\022" + + "Z\n\034data_boost_read_local_writes\030\004 \001(\01322." + + "google.bigtable.admin.v2.DataBoostReadLo" + + "calWritesH\000B\006\n\004mode\"\032\n\030StandardReadRemot" + + "eWrites\"\032\n\030DataBoostReadLocalWrites\".\n\030C" + + "heckConsistencyResponse\022\022\n\nconsistent\030\001 " + + "\001(\010\"\346\001\n\024SnapshotTableRequest\0228\n\004name\030\001 \001" + + "(\tB*\340A\002\372A$\n\"bigtableadmin.googleapis.com" + + "/Table\022=\n\007cluster\030\002 \001(\tB,\340A\002\372A&\n$bigtabl" + + "eadmin.googleapis.com/Cluster\022\030\n\013snapsho" + + "t_id\030\003 \001(\tB\003\340A\002\022&\n\003ttl\030\004 \001(\0132\031.google.pr" + + "otobuf.Duration\022\023\n\013description\030\005 \001(\t\"Q\n\022" + + "GetSnapshotRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'" + + "\n%bigtableadmin.googleapis.com/Snapshot\"" + + "{\n\024ListSnapshotsRequest\022<\n\006parent\030\001 \001(\tB" + + ",\340A\002\372A&\n$bigtableadmin.googleapis.com/Cl" + + "uster\022\021\n\tpage_size\030\002 \001(\005\022\022\n\npage_token\030\003" + + " \001(\t\"g\n\025ListSnapshotsResponse\0225\n\tsnapsho" + + "ts\030\001 \003(\0132\".google.bigtable.admin.v2.Snap" + + "shot\022\027\n\017next_page_token\030\002 \001(\t\"T\n\025DeleteS" + + "napshotRequest\022;\n\004name\030\001 \001(\tB-\340A\002\372A\'\n%bi" + + "gtableadmin.googleapis.com/Snapshot\"\304\001\n\025" + + "SnapshotTableMetadata\022H\n\020original_reques" + + "t\030\001 \001(\0132..google.bigtable.admin.v2.Snaps" + + "hotTableRequest\0220\n\014request_time\030\002 \001(\0132\032." + + "google.protobuf.Timestamp\022/\n\013finish_time" + + "\030\003 \001(\0132\032.google.protobuf.Timestamp\"\330\001\n\037C" + + "reateTableFromSnapshotMetadata\022R\n\020origin" + + "al_request\030\001 \001(\01328.google.bigtable.admin" + + ".v2.CreateTableFromSnapshotRequest\0220\n\014re" + "quest_time\030\002 \001(\0132\032.google.protobuf.Times" + "tamp\022/\n\013finish_time\030\003 \001(\0132\032.google.proto" - + "buf.Timestamp\"\334\001\n\032ListAuthorizedViewsReq" - + "uest\022C\n\006parent\030\001 \001(\tB3\340A\002\372A-\022+bigtablead" - + "min.googleapis.com/AuthorizedView\022\026\n\tpag" - + "e_size\030\002 \001(\005B\003\340A\001\022\027\n\npage_token\030\003 \001(\tB\003\340" - + "A\001\022H\n\004view\030\004 \001(\01625.google.bigtable.admin" - + ".v2.AuthorizedView.ResponseViewB\003\340A\001\"z\n\033" - + "ListAuthorizedViewsResponse\022B\n\020authorize" - + "d_views\030\001 \003(\0132(.google.bigtable.admin.v2" - + ".AuthorizedView\022\027\n\017next_page_token\030\002 \001(\t" - + "\"\247\001\n\030GetAuthorizedViewRequest\022A\n\004name\030\001 " - + "\001(\tB3\340A\002\372A-\n+bigtableadmin.googleapis.co" - + "m/AuthorizedView\022H\n\004view\030\002 \001(\01625.google." - + "bigtable.admin.v2.AuthorizedView.Respons" - + "eViewB\003\340A\001\"\271\001\n\033UpdateAuthorizedViewReque" - + "st\022F\n\017authorized_view\030\001 \001(\0132(.google.big" - + "table.admin.v2.AuthorizedViewB\003\340A\002\0224\n\013up" - + "date_mask\030\002 \001(\0132\032.google.protobuf.FieldM" - + "askB\003\340A\001\022\034\n\017ignore_warnings\030\003 \001(\010B\003\340A\001\"\322" - + "\001\n\034UpdateAuthorizedViewMetadata\022O\n\020origi" - + "nal_request\030\001 \001(\01325.google.bigtable.admi" - + "n.v2.UpdateAuthorizedViewRequest\0220\n\014requ" - + "est_time\030\002 \001(\0132\032.google.protobuf.Timesta" - + "mp\022/\n\013finish_time\030\003 \001(\0132\032.google.protobu" - + "f.Timestamp\"s\n\033DeleteAuthorizedViewReque" - + "st\022A\n\004name\030\001 \001(\tB3\340A\002\372A-\n+bigtableadmin." - + "googleapis.com/AuthorizedView\022\021\n\004etag\030\002 " - + "\001(\tB\003\340A\0012\2663\n\022BigtableTableAdmin\022\253\001\n\013Crea" - + "teTable\022,.google.bigtable.admin.v2.Creat" - + "eTableRequest\032\037.google.bigtable.admin.v2" - + ".Table\"M\332A\025parent,table_id,table\202\323\344\223\002/\"*" - + "/v2/{parent=projects/*/instances/*}/tabl" - + "es:\001*\022\212\002\n\027CreateTableFromSnapshot\0228.goog" - + "le.bigtable.admin.v2.CreateTableFromSnap" - + "shotRequest\032\035.google.longrunning.Operati" - + "on\"\225\001\312A(\n\005Table\022\037CreateTableFromSnapshot" - + "Metadata\332A\037parent,table_id,source_snapsh" - + "ot\202\323\344\223\002B\"=/v2/{parent=projects/*/instanc" - + "es/*}/tables:createFromSnapshot:\001*\022\244\001\n\nL" - + "istTables\022+.google.bigtable.admin.v2.Lis" - + "tTablesRequest\032,.google.bigtable.admin.v" - + "2.ListTablesResponse\";\332A\006parent\202\323\344\223\002,\022*/" - + "v2/{parent=projects/*/instances/*}/table" - + "s\022\221\001\n\010GetTable\022).google.bigtable.admin.v" - + "2.GetTableRequest\032\037.google.bigtable.admi" - + "n.v2.Table\"9\332A\004name\202\323\344\223\002,\022*/v2/{name=pro" - + "jects/*/instances/*/tables/*}\022\316\001\n\013Update" - + "Table\022,.google.bigtable.admin.v2.UpdateT" - + "ableRequest\032\035.google.longrunning.Operati" - + "on\"r\312A\034\n\005Table\022\023UpdateTableMetadata\332A\021ta" - + "ble,update_mask\202\323\344\223\002920/v2/{table.name=p" - + "rojects/*/instances/*/tables/*}:\005table\022\216" - + "\001\n\013DeleteTable\022,.google.bigtable.admin.v" - + "2.DeleteTableRequest\032\026.google.protobuf.E" - + "mpty\"9\332A\004name\202\323\344\223\002,**/v2/{name=projects/" - + "*/instances/*/tables/*}\022\306\001\n\rUndeleteTabl" - + "e\022..google.bigtable.admin.v2.UndeleteTab" - + "leRequest\032\035.google.longrunning.Operation" - + "\"f\312A\036\n\005Table\022\025UndeleteTableMetadata\332A\004na" - + "me\202\323\344\223\0028\"3/v2/{name=projects/*/instances" - + "/*/tables/*}:undelete:\001*\022\241\002\n\024CreateAutho" - + "rizedView\0225.google.bigtable.admin.v2.Cre" - + "ateAuthorizedViewRequest\032\035.google.longru" - + "nning.Operation\"\262\001\312A.\n\016AuthorizedView\022\034C" - + "reateAuthorizedViewMetadata\332A)parent,aut" - + "horized_view,authorized_view_id\202\323\344\223\002O\"\022\022*\022\022* + * Checks that reads using an app profile with `StandardIsolation` can + * see all writes committed before the token was created, even if the + * read and write target different clusters. + * + * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + * + * @return Whether the standardReadRemoteWrites field is set. + */ + @java.lang.Override + public boolean hasStandardReadRemoteWrites() { + return modeCase_ == 3; + } + /** + * + * + *
+   * Checks that reads using an app profile with `StandardIsolation` can
+   * see all writes committed before the token was created, even if the
+   * read and write target different clusters.
+   * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + * + * @return The standardReadRemoteWrites. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.StandardReadRemoteWrites getStandardReadRemoteWrites() { + if (modeCase_ == 3) { + return (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_; + } + return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance(); + } + /** + * + * + *
+   * Checks that reads using an app profile with `StandardIsolation` can
+   * see all writes committed before the token was created, even if the
+   * read and write target different clusters.
+   * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + @java.lang.Override + public com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder + getStandardReadRemoteWritesOrBuilder() { + if (modeCase_ == 3) { + return (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_; + } + return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance(); + } + + public static final int DATA_BOOST_READ_LOCAL_WRITES_FIELD_NUMBER = 4; + /** + * + * + *
+   * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+   * can see all writes committed before the token was created, but only if
+   * the read and write target the same cluster.
+   * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + * + * @return Whether the dataBoostReadLocalWrites field is set. + */ + @java.lang.Override + public boolean hasDataBoostReadLocalWrites() { + return modeCase_ == 4; + } + /** + * + * + *
+   * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+   * can see all writes committed before the token was created, but only if
+   * the read and write target the same cluster.
+   * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + * + * @return The dataBoostReadLocalWrites. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDataBoostReadLocalWrites() { + if (modeCase_ == 4) { + return (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_; + } + return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance(); + } + /** + * + * + *
+   * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+   * can see all writes committed before the token was created, but only if
+   * the read and write target the same cluster.
+   * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + @java.lang.Override + public com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder + getDataBoostReadLocalWritesOrBuilder() { + if (modeCase_ == 4) { + return (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_; + } + return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -195,6 +366,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(consistencyToken_)) { com.google.protobuf.GeneratedMessageV3.writeString(output, 2, consistencyToken_); } + if (modeCase_ == 3) { + output.writeMessage(3, (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_); + } + if (modeCase_ == 4) { + output.writeMessage(4, (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_); + } getUnknownFields().writeTo(output); } @@ -210,6 +387,16 @@ public int getSerializedSize() { if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(consistencyToken_)) { size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, consistencyToken_); } + if (modeCase_ == 3) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 3, (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_); + } + if (modeCase_ == 4) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 4, (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -228,6 +415,19 @@ public boolean equals(final java.lang.Object obj) { if (!getName().equals(other.getName())) return false; if (!getConsistencyToken().equals(other.getConsistencyToken())) return false; + if (!getModeCase().equals(other.getModeCase())) return false; + switch (modeCase_) { + case 3: + if (!getStandardReadRemoteWrites().equals(other.getStandardReadRemoteWrites())) + return false; + break; + case 4: + if (!getDataBoostReadLocalWrites().equals(other.getDataBoostReadLocalWrites())) + return false; + break; + case 0: + default: + } if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -243,6 +443,18 @@ public int hashCode() { hash = (53 * hash) + getName().hashCode(); hash = (37 * hash) + CONSISTENCY_TOKEN_FIELD_NUMBER; hash = (53 * hash) + getConsistencyToken().hashCode(); + switch (modeCase_) { + case 3: + hash = (37 * hash) + STANDARD_READ_REMOTE_WRITES_FIELD_NUMBER; + hash = (53 * hash) + getStandardReadRemoteWrites().hashCode(); + break; + case 4: + hash = (37 * hash) + DATA_BOOST_READ_LOCAL_WRITES_FIELD_NUMBER; + hash = (53 * hash) + getDataBoostReadLocalWrites().hashCode(); + break; + case 0: + default: + } hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -385,6 +597,14 @@ public Builder clear() { bitField0_ = 0; name_ = ""; consistencyToken_ = ""; + if (standardReadRemoteWritesBuilder_ != null) { + standardReadRemoteWritesBuilder_.clear(); + } + if (dataBoostReadLocalWritesBuilder_ != null) { + dataBoostReadLocalWritesBuilder_.clear(); + } + modeCase_ = 0; + mode_ = null; return this; } @@ -415,6 +635,7 @@ public com.google.bigtable.admin.v2.CheckConsistencyRequest buildPartial() { if (bitField0_ != 0) { buildPartial0(result); } + buildPartialOneofs(result); onBuilt(); return result; } @@ -429,6 +650,17 @@ private void buildPartial0(com.google.bigtable.admin.v2.CheckConsistencyRequest } } + private void buildPartialOneofs(com.google.bigtable.admin.v2.CheckConsistencyRequest result) { + result.modeCase_ = modeCase_; + result.mode_ = this.mode_; + if (modeCase_ == 3 && standardReadRemoteWritesBuilder_ != null) { + result.mode_ = standardReadRemoteWritesBuilder_.build(); + } + if (modeCase_ == 4 && dataBoostReadLocalWritesBuilder_ != null) { + result.mode_ = dataBoostReadLocalWritesBuilder_.build(); + } + } + @java.lang.Override public Builder clone() { return super.clone(); @@ -485,6 +717,22 @@ public Builder mergeFrom(com.google.bigtable.admin.v2.CheckConsistencyRequest ot bitField0_ |= 0x00000002; onChanged(); } + switch (other.getModeCase()) { + case STANDARD_READ_REMOTE_WRITES: + { + mergeStandardReadRemoteWrites(other.getStandardReadRemoteWrites()); + break; + } + case DATA_BOOST_READ_LOCAL_WRITES: + { + mergeDataBoostReadLocalWrites(other.getDataBoostReadLocalWrites()); + break; + } + case MODE_NOT_SET: + { + break; + } + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -523,6 +771,20 @@ public Builder mergeFrom( bitField0_ |= 0x00000002; break; } // case 18 + case 26: + { + input.readMessage( + getStandardReadRemoteWritesFieldBuilder().getBuilder(), extensionRegistry); + modeCase_ = 3; + break; + } // case 26 + case 34: + { + input.readMessage( + getDataBoostReadLocalWritesFieldBuilder().getBuilder(), extensionRegistry); + modeCase_ = 4; + break; + } // case 34 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -540,6 +802,20 @@ public Builder mergeFrom( return this; } + private int modeCase_ = 0; + private java.lang.Object mode_; + + public ModeCase getModeCase() { + return ModeCase.forNumber(modeCase_); + } + + public Builder clearMode() { + modeCase_ = 0; + mode_ = null; + onChanged(); + return this; + } + private int bitField0_; private java.lang.Object name_ = ""; @@ -774,6 +1050,488 @@ public Builder setConsistencyTokenBytes(com.google.protobuf.ByteString value) { return this; } + private com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.StandardReadRemoteWrites, + com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder, + com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder> + standardReadRemoteWritesBuilder_; + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + * + * @return Whether the standardReadRemoteWrites field is set. + */ + @java.lang.Override + public boolean hasStandardReadRemoteWrites() { + return modeCase_ == 3; + } + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + * + * @return The standardReadRemoteWrites. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.StandardReadRemoteWrites getStandardReadRemoteWrites() { + if (standardReadRemoteWritesBuilder_ == null) { + if (modeCase_ == 3) { + return (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_; + } + return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance(); + } else { + if (modeCase_ == 3) { + return standardReadRemoteWritesBuilder_.getMessage(); + } + return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance(); + } + } + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + public Builder setStandardReadRemoteWrites( + com.google.bigtable.admin.v2.StandardReadRemoteWrites value) { + if (standardReadRemoteWritesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mode_ = value; + onChanged(); + } else { + standardReadRemoteWritesBuilder_.setMessage(value); + } + modeCase_ = 3; + return this; + } + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + public Builder setStandardReadRemoteWrites( + com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder builderForValue) { + if (standardReadRemoteWritesBuilder_ == null) { + mode_ = builderForValue.build(); + onChanged(); + } else { + standardReadRemoteWritesBuilder_.setMessage(builderForValue.build()); + } + modeCase_ = 3; + return this; + } + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + public Builder mergeStandardReadRemoteWrites( + com.google.bigtable.admin.v2.StandardReadRemoteWrites value) { + if (standardReadRemoteWritesBuilder_ == null) { + if (modeCase_ == 3 + && mode_ + != com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance()) { + mode_ = + com.google.bigtable.admin.v2.StandardReadRemoteWrites.newBuilder( + (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_) + .mergeFrom(value) + .buildPartial(); + } else { + mode_ = value; + } + onChanged(); + } else { + if (modeCase_ == 3) { + standardReadRemoteWritesBuilder_.mergeFrom(value); + } else { + standardReadRemoteWritesBuilder_.setMessage(value); + } + } + modeCase_ = 3; + return this; + } + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + public Builder clearStandardReadRemoteWrites() { + if (standardReadRemoteWritesBuilder_ == null) { + if (modeCase_ == 3) { + modeCase_ = 0; + mode_ = null; + onChanged(); + } + } else { + if (modeCase_ == 3) { + modeCase_ = 0; + mode_ = null; + } + standardReadRemoteWritesBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + public com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder + getStandardReadRemoteWritesBuilder() { + return getStandardReadRemoteWritesFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + @java.lang.Override + public com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder + getStandardReadRemoteWritesOrBuilder() { + if ((modeCase_ == 3) && (standardReadRemoteWritesBuilder_ != null)) { + return standardReadRemoteWritesBuilder_.getMessageOrBuilder(); + } else { + if (modeCase_ == 3) { + return (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_; + } + return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance(); + } + } + /** + * + * + *
+     * Checks that reads using an app profile with `StandardIsolation` can
+     * see all writes committed before the token was created, even if the
+     * read and write target different clusters.
+     * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.StandardReadRemoteWrites, + com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder, + com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder> + getStandardReadRemoteWritesFieldBuilder() { + if (standardReadRemoteWritesBuilder_ == null) { + if (!(modeCase_ == 3)) { + mode_ = com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance(); + } + standardReadRemoteWritesBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.StandardReadRemoteWrites, + com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder, + com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder>( + (com.google.bigtable.admin.v2.StandardReadRemoteWrites) mode_, + getParentForChildren(), + isClean()); + mode_ = null; + } + modeCase_ = 3; + onChanged(); + return standardReadRemoteWritesBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.DataBoostReadLocalWrites, + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder, + com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder> + dataBoostReadLocalWritesBuilder_; + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + * + * @return Whether the dataBoostReadLocalWrites field is set. + */ + @java.lang.Override + public boolean hasDataBoostReadLocalWrites() { + return modeCase_ == 4; + } + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + * + * @return The dataBoostReadLocalWrites. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDataBoostReadLocalWrites() { + if (dataBoostReadLocalWritesBuilder_ == null) { + if (modeCase_ == 4) { + return (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_; + } + return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance(); + } else { + if (modeCase_ == 4) { + return dataBoostReadLocalWritesBuilder_.getMessage(); + } + return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance(); + } + } + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + public Builder setDataBoostReadLocalWrites( + com.google.bigtable.admin.v2.DataBoostReadLocalWrites value) { + if (dataBoostReadLocalWritesBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + mode_ = value; + onChanged(); + } else { + dataBoostReadLocalWritesBuilder_.setMessage(value); + } + modeCase_ = 4; + return this; + } + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + public Builder setDataBoostReadLocalWrites( + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder builderForValue) { + if (dataBoostReadLocalWritesBuilder_ == null) { + mode_ = builderForValue.build(); + onChanged(); + } else { + dataBoostReadLocalWritesBuilder_.setMessage(builderForValue.build()); + } + modeCase_ = 4; + return this; + } + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + public Builder mergeDataBoostReadLocalWrites( + com.google.bigtable.admin.v2.DataBoostReadLocalWrites value) { + if (dataBoostReadLocalWritesBuilder_ == null) { + if (modeCase_ == 4 + && mode_ + != com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance()) { + mode_ = + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.newBuilder( + (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_) + .mergeFrom(value) + .buildPartial(); + } else { + mode_ = value; + } + onChanged(); + } else { + if (modeCase_ == 4) { + dataBoostReadLocalWritesBuilder_.mergeFrom(value); + } else { + dataBoostReadLocalWritesBuilder_.setMessage(value); + } + } + modeCase_ = 4; + return this; + } + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + public Builder clearDataBoostReadLocalWrites() { + if (dataBoostReadLocalWritesBuilder_ == null) { + if (modeCase_ == 4) { + modeCase_ = 0; + mode_ = null; + onChanged(); + } + } else { + if (modeCase_ == 4) { + modeCase_ = 0; + mode_ = null; + } + dataBoostReadLocalWritesBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + public com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder + getDataBoostReadLocalWritesBuilder() { + return getDataBoostReadLocalWritesFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + @java.lang.Override + public com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder + getDataBoostReadLocalWritesOrBuilder() { + if ((modeCase_ == 4) && (dataBoostReadLocalWritesBuilder_ != null)) { + return dataBoostReadLocalWritesBuilder_.getMessageOrBuilder(); + } else { + if (modeCase_ == 4) { + return (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_; + } + return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance(); + } + } + /** + * + * + *
+     * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+     * can see all writes committed before the token was created, but only if
+     * the read and write target the same cluster.
+     * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.DataBoostReadLocalWrites, + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder, + com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder> + getDataBoostReadLocalWritesFieldBuilder() { + if (dataBoostReadLocalWritesBuilder_ == null) { + if (!(modeCase_ == 4)) { + mode_ = com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance(); + } + dataBoostReadLocalWritesBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.bigtable.admin.v2.DataBoostReadLocalWrites, + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder, + com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder>( + (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) mode_, + getParentForChildren(), + isClean()); + mode_ = null; + } + modeCase_ = 4; + onChanged(); + return dataBoostReadLocalWritesBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CheckConsistencyRequestOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CheckConsistencyRequestOrBuilder.java index 7f47bb6720..259e282606 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CheckConsistencyRequestOrBuilder.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/CheckConsistencyRequestOrBuilder.java @@ -81,4 +81,96 @@ public interface CheckConsistencyRequestOrBuilder * @return The bytes for consistencyToken. */ com.google.protobuf.ByteString getConsistencyTokenBytes(); + + /** + * + * + *
+   * Checks that reads using an app profile with `StandardIsolation` can
+   * see all writes committed before the token was created, even if the
+   * read and write target different clusters.
+   * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + * + * @return Whether the standardReadRemoteWrites field is set. + */ + boolean hasStandardReadRemoteWrites(); + /** + * + * + *
+   * Checks that reads using an app profile with `StandardIsolation` can
+   * see all writes committed before the token was created, even if the
+   * read and write target different clusters.
+   * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + * + * @return The standardReadRemoteWrites. + */ + com.google.bigtable.admin.v2.StandardReadRemoteWrites getStandardReadRemoteWrites(); + /** + * + * + *
+   * Checks that reads using an app profile with `StandardIsolation` can
+   * see all writes committed before the token was created, even if the
+   * read and write target different clusters.
+   * 
+ * + * .google.bigtable.admin.v2.StandardReadRemoteWrites standard_read_remote_writes = 3; + * + */ + com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder + getStandardReadRemoteWritesOrBuilder(); + + /** + * + * + *
+   * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+   * can see all writes committed before the token was created, but only if
+   * the read and write target the same cluster.
+   * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + * + * @return Whether the dataBoostReadLocalWrites field is set. + */ + boolean hasDataBoostReadLocalWrites(); + /** + * + * + *
+   * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+   * can see all writes committed before the token was created, but only if
+   * the read and write target the same cluster.
+   * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + * + * @return The dataBoostReadLocalWrites. + */ + com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDataBoostReadLocalWrites(); + /** + * + * + *
+   * Checks that reads using an app profile with `DataBoostIsolationReadOnly`
+   * can see all writes committed before the token was created, but only if
+   * the read and write target the same cluster.
+   * 
+ * + * .google.bigtable.admin.v2.DataBoostReadLocalWrites data_boost_read_local_writes = 4; + * + */ + com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder + getDataBoostReadLocalWritesOrBuilder(); + + com.google.bigtable.admin.v2.CheckConsistencyRequest.ModeCase getModeCase(); } diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWrites.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWrites.java new file mode 100644 index 0000000000..88f7fa88ae --- /dev/null +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWrites.java @@ -0,0 +1,435 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/admin/v2/bigtable_table_admin.proto + +// Protobuf Java Version: 3.25.2 +package com.google.bigtable.admin.v2; + +/** + * + * + *
+ * Checks that all writes before the consistency token was generated in the same
+ * cluster are readable by Databoost.
+ * 
+ * + * Protobuf type {@code google.bigtable.admin.v2.DataBoostReadLocalWrites} + */ +public final class DataBoostReadLocalWrites extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.admin.v2.DataBoostReadLocalWrites) + DataBoostReadLocalWritesOrBuilder { + private static final long serialVersionUID = 0L; + // Use DataBoostReadLocalWrites.newBuilder() to construct. + private DataBoostReadLocalWrites(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private DataBoostReadLocalWrites() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new DataBoostReadLocalWrites(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.class, + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.bigtable.admin.v2.DataBoostReadLocalWrites)) { + return super.equals(obj); + } + com.google.bigtable.admin.v2.DataBoostReadLocalWrites other = + (com.google.bigtable.admin.v2.DataBoostReadLocalWrites) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.bigtable.admin.v2.DataBoostReadLocalWrites prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Checks that all writes before the consistency token was generated in the same
+   * cluster are readable by Databoost.
+   * 
+ * + * Protobuf type {@code google.bigtable.admin.v2.DataBoostReadLocalWrites} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.admin.v2.DataBoostReadLocalWrites) + com.google.bigtable.admin.v2.DataBoostReadLocalWritesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.class, + com.google.bigtable.admin.v2.DataBoostReadLocalWrites.Builder.class); + } + + // Construct using com.google.bigtable.admin.v2.DataBoostReadLocalWrites.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_DataBoostReadLocalWrites_descriptor; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDefaultInstanceForType() { + return com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance(); + } + + @java.lang.Override + public com.google.bigtable.admin.v2.DataBoostReadLocalWrites build() { + com.google.bigtable.admin.v2.DataBoostReadLocalWrites result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.DataBoostReadLocalWrites buildPartial() { + com.google.bigtable.admin.v2.DataBoostReadLocalWrites result = + new com.google.bigtable.admin.v2.DataBoostReadLocalWrites(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.bigtable.admin.v2.DataBoostReadLocalWrites) { + return mergeFrom((com.google.bigtable.admin.v2.DataBoostReadLocalWrites) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.bigtable.admin.v2.DataBoostReadLocalWrites other) { + if (other == com.google.bigtable.admin.v2.DataBoostReadLocalWrites.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.admin.v2.DataBoostReadLocalWrites) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.DataBoostReadLocalWrites) + private static final com.google.bigtable.admin.v2.DataBoostReadLocalWrites DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.bigtable.admin.v2.DataBoostReadLocalWrites(); + } + + public static com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public DataBoostReadLocalWrites parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.DataBoostReadLocalWrites getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWritesOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWritesOrBuilder.java new file mode 100644 index 0000000000..be217512d3 --- /dev/null +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/DataBoostReadLocalWritesOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/admin/v2/bigtable_table_admin.proto + +// Protobuf Java Version: 3.25.2 +package com.google.bigtable.admin.v2; + +public interface DataBoostReadLocalWritesOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.admin.v2.DataBoostReadLocalWrites) + com.google.protobuf.MessageOrBuilder {} diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/InstanceProto.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/InstanceProto.java index c6e08673fa..29a37bf8f6 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/InstanceProto.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/InstanceProto.java @@ -76,6 +76,10 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_bigtable_admin_v2_AppProfile_StandardIsolation_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_bigtable_admin_v2_AppProfile_StandardIsolation_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_bigtable_admin_v2_HotTablet_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable @@ -137,7 +141,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\002\022\014\n\010RESIZING\020\003\022\014\n\010DISABLED\020\004:e\352Ab\n$bigt" + "ableadmin.googleapis.com/Cluster\022:projec" + "ts/{project}/instances/{instance}/cluste" - + "rs/{cluster}B\010\n\006config\"\350\006\n\nAppProfile\022\014\n" + + "rs/{cluster}B\010\n\006config\"\322\t\n\nAppProfile\022\014\n" + "\004name\030\001 \001(\t\022\014\n\004etag\030\002 \001(\t\022\023\n\013description" + "\030\003 \001(\t\022g\n\035multi_cluster_routing_use_any\030" + "\005 \001(\0132>.google.bigtable.admin.v2.AppProf" @@ -148,36 +152,46 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + ".admin.v2.AppProfile.PriorityB\002\030\001H\001\022T\n\022s" + "tandard_isolation\030\013 \001(\01326.google.bigtabl" + "e.admin.v2.AppProfile.StandardIsolationH" - + "\001\0320\n\031MultiClusterRoutingUseAny\022\023\n\013cluste" - + "r_ids\030\001 \003(\t\032N\n\024SingleClusterRouting\022\022\n\nc" - + "luster_id\030\001 \001(\t\022\"\n\032allow_transactional_w" - + "rites\030\002 \001(\010\032T\n\021StandardIsolation\022?\n\010prio" - + "rity\030\001 \001(\0162-.google.bigtable.admin.v2.Ap" - + "pProfile.Priority\"^\n\010Priority\022\030\n\024PRIORIT" - + "Y_UNSPECIFIED\020\000\022\020\n\014PRIORITY_LOW\020\001\022\023\n\017PRI" - + "ORITY_MEDIUM\020\002\022\021\n\rPRIORITY_HIGH\020\003:o\352Al\n\'" - + "bigtableadmin.googleapis.com/AppProfile\022" - + "Aprojects/{project}/instances/{instance}" - + "/appProfiles/{app_profile}B\020\n\016routing_po" - + "licyB\013\n\tisolation\"\210\003\n\tHotTablet\022\014\n\004name\030" - + "\001 \001(\t\022;\n\ntable_name\030\002 \001(\tB\'\372A$\n\"bigtable" - + "admin.googleapis.com/Table\0223\n\nstart_time" - + "\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022" - + "1\n\010end_time\030\004 \001(\0132\032.google.protobuf.Time" - + "stampB\003\340A\003\022\021\n\tstart_key\030\005 \001(\t\022\017\n\007end_key" - + "\030\006 \001(\t\022#\n\026node_cpu_usage_percent\030\007 \001(\002B\003" - + "\340A\003:\177\352A|\n&bigtableadmin.googleapis.com/H" - + "otTablet\022Rprojects/{project}/instances/{" - + "instance}/clusters/{cluster}/hotTablets/" - + "{hot_tablet}B\320\002\n\034com.google.bigtable.adm" - + "in.v2B\rInstanceProtoP\001Z=google.golang.or" - + "g/genproto/googleapis/bigtable/admin/v2;" - + "admin\252\002\036Google.Cloud.Bigtable.Admin.V2\312\002" - + "\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002\"Google" - + "::Cloud::Bigtable::Admin::V2\352Ax\n!cloudkm" - + "s.googleapis.com/CryptoKey\022Sprojects/{pr" - + "oject}/locations/{location}/keyRings/{ke" - + "y_ring}/cryptoKeys/{crypto_key}b\006proto3" + + "\001\022i\n\036data_boost_isolation_read_only\030\n \001(" + + "\0132?.google.bigtable.admin.v2.AppProfile." + + "DataBoostIsolationReadOnlyH\001\0320\n\031MultiClu" + + "sterRoutingUseAny\022\023\n\013cluster_ids\030\001 \003(\t\032N" + + "\n\024SingleClusterRouting\022\022\n\ncluster_id\030\001 \001" + + "(\t\022\"\n\032allow_transactional_writes\030\002 \001(\010\032T" + + "\n\021StandardIsolation\022?\n\010priority\030\001 \001(\0162-." + + "google.bigtable.admin.v2.AppProfile.Prio" + + "rity\032\374\001\n\032DataBoostIsolationReadOnly\022w\n\025c" + + "ompute_billing_owner\030\001 \001(\0162S.google.bigt" + + "able.admin.v2.AppProfile.DataBoostIsolat" + + "ionReadOnly.ComputeBillingOwnerH\000\210\001\001\"K\n\023" + + "ComputeBillingOwner\022%\n!COMPUTE_BILLING_O" + + "WNER_UNSPECIFIED\020\000\022\r\n\tHOST_PAYS\020\001B\030\n\026_co" + + "mpute_billing_owner\"^\n\010Priority\022\030\n\024PRIOR" + + "ITY_UNSPECIFIED\020\000\022\020\n\014PRIORITY_LOW\020\001\022\023\n\017P" + + "RIORITY_MEDIUM\020\002\022\021\n\rPRIORITY_HIGH\020\003:o\352Al" + + "\n\'bigtableadmin.googleapis.com/AppProfil" + + "e\022Aprojects/{project}/instances/{instanc" + + "e}/appProfiles/{app_profile}B\020\n\016routing_" + + "policyB\013\n\tisolation\"\210\003\n\tHotTablet\022\014\n\004nam" + + "e\030\001 \001(\t\022;\n\ntable_name\030\002 \001(\tB\'\372A$\n\"bigtab" + + "leadmin.googleapis.com/Table\0223\n\nstart_ti" + + "me\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A" + + "\003\0221\n\010end_time\030\004 \001(\0132\032.google.protobuf.Ti" + + "mestampB\003\340A\003\022\021\n\tstart_key\030\005 \001(\t\022\017\n\007end_k" + + "ey\030\006 \001(\t\022#\n\026node_cpu_usage_percent\030\007 \001(\002" + + "B\003\340A\003:\177\352A|\n&bigtableadmin.googleapis.com" + + "/HotTablet\022Rprojects/{project}/instances" + + "/{instance}/clusters/{cluster}/hotTablet" + + "s/{hot_tablet}B\320\002\n\034com.google.bigtable.a" + + "dmin.v2B\rInstanceProtoP\001Z=google.golang." + + "org/genproto/googleapis/bigtable/admin/v" + + "2;admin\252\002\036Google.Cloud.Bigtable.Admin.V2" + + "\312\002\036Google\\Cloud\\Bigtable\\Admin\\V2\352\002\"Goog" + + "le::Cloud::Bigtable::Admin::V2\352Ax\n!cloud" + + "kms.googleapis.com/CryptoKey\022Sprojects/{" + + "project}/locations/{location}/keyRings/{" + + "key_ring}/cryptoKeys/{crypto_key}b\006proto" + + "3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -272,6 +286,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "SingleClusterRouting", "Priority", "StandardIsolation", + "DataBoostIsolationReadOnly", "RoutingPolicy", "Isolation", }); @@ -299,6 +314,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Priority", }); + internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor = + internal_static_google_bigtable_admin_v2_AppProfile_descriptor.getNestedTypes().get(3); + internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_bigtable_admin_v2_AppProfile_DataBoostIsolationReadOnly_descriptor, + new java.lang.String[] { + "ComputeBillingOwner", + }); internal_static_google_bigtable_admin_v2_HotTablet_descriptor = getDescriptor().getMessageTypes().get(5); internal_static_google_bigtable_admin_v2_HotTablet_fieldAccessorTable = diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWrites.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWrites.java new file mode 100644 index 0000000000..6546f5e47c --- /dev/null +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWrites.java @@ -0,0 +1,435 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/admin/v2/bigtable_table_admin.proto + +// Protobuf Java Version: 3.25.2 +package com.google.bigtable.admin.v2; + +/** + * + * + *
+ * Checks that all writes before the consistency token was generated are
+ * replicated in every cluster and readable.
+ * 
+ * + * Protobuf type {@code google.bigtable.admin.v2.StandardReadRemoteWrites} + */ +public final class StandardReadRemoteWrites extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.bigtable.admin.v2.StandardReadRemoteWrites) + StandardReadRemoteWritesOrBuilder { + private static final long serialVersionUID = 0L; + // Use StandardReadRemoteWrites.newBuilder() to construct. + private StandardReadRemoteWrites(com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private StandardReadRemoteWrites() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new StandardReadRemoteWrites(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.StandardReadRemoteWrites.class, + com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder.class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.bigtable.admin.v2.StandardReadRemoteWrites)) { + return super.equals(obj); + } + com.google.bigtable.admin.v2.StandardReadRemoteWrites other = + (com.google.bigtable.admin.v2.StandardReadRemoteWrites) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.bigtable.admin.v2.StandardReadRemoteWrites prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + /** + * + * + *
+   * Checks that all writes before the consistency token was generated are
+   * replicated in every cluster and readable.
+   * 
+ * + * Protobuf type {@code google.bigtable.admin.v2.StandardReadRemoteWrites} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.admin.v2.StandardReadRemoteWrites) + com.google.bigtable.admin.v2.StandardReadRemoteWritesOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.StandardReadRemoteWrites.class, + com.google.bigtable.admin.v2.StandardReadRemoteWrites.Builder.class); + } + + // Construct using com.google.bigtable.admin.v2.StandardReadRemoteWrites.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.bigtable.admin.v2.BigtableTableAdminProto + .internal_static_google_bigtable_admin_v2_StandardReadRemoteWrites_descriptor; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.StandardReadRemoteWrites getDefaultInstanceForType() { + return com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance(); + } + + @java.lang.Override + public com.google.bigtable.admin.v2.StandardReadRemoteWrites build() { + com.google.bigtable.admin.v2.StandardReadRemoteWrites result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.StandardReadRemoteWrites buildPartial() { + com.google.bigtable.admin.v2.StandardReadRemoteWrites result = + new com.google.bigtable.admin.v2.StandardReadRemoteWrites(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.bigtable.admin.v2.StandardReadRemoteWrites) { + return mergeFrom((com.google.bigtable.admin.v2.StandardReadRemoteWrites) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.bigtable.admin.v2.StandardReadRemoteWrites other) { + if (other == com.google.bigtable.admin.v2.StandardReadRemoteWrites.getDefaultInstance()) + return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.admin.v2.StandardReadRemoteWrites) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.StandardReadRemoteWrites) + private static final com.google.bigtable.admin.v2.StandardReadRemoteWrites DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.bigtable.admin.v2.StandardReadRemoteWrites(); + } + + public static com.google.bigtable.admin.v2.StandardReadRemoteWrites getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public StandardReadRemoteWrites parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.StandardReadRemoteWrites getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWritesOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWritesOrBuilder.java new file mode 100644 index 0000000000..aff7f459f3 --- /dev/null +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/StandardReadRemoteWritesOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2024 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/bigtable/admin/v2/bigtable_table_admin.proto + +// Protobuf Java Version: 3.25.2 +package com.google.bigtable.admin.v2; + +public interface StandardReadRemoteWritesOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.admin.v2.StandardReadRemoteWrites) + com.google.protobuf.MessageOrBuilder {} diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto index 9d5a38b073..9fe63a2738 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto +++ b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/bigtable_table_admin.proto @@ -836,8 +836,30 @@ message CheckConsistencyRequest { // Required. The token created using GenerateConsistencyToken for the Table. string consistency_token = 2 [(google.api.field_behavior) = REQUIRED]; + + // Which type of read needs to consistently observe which type of write? + // Default: `standard_read_remote_writes` + oneof mode { + // Checks that reads using an app profile with `StandardIsolation` can + // see all writes committed before the token was created, even if the + // read and write target different clusters. + StandardReadRemoteWrites standard_read_remote_writes = 3; + + // Checks that reads using an app profile with `DataBoostIsolationReadOnly` + // can see all writes committed before the token was created, but only if + // the read and write target the same cluster. + DataBoostReadLocalWrites data_boost_read_local_writes = 4; + } } +// Checks that all writes before the consistency token was generated are +// replicated in every cluster and readable. +message StandardReadRemoteWrites {} + +// Checks that all writes before the consistency token was generated in the same +// cluster are readable by Databoost. +message DataBoostReadLocalWrites {} + // Response message for // [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] message CheckConsistencyResponse { diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/instance.proto b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/instance.proto index 950d9f4880..fc7c2b7c93 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/instance.proto +++ b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/instance.proto @@ -297,6 +297,35 @@ message AppProfile { Priority priority = 1; } + // Data Boost is a serverless compute capability that lets you run + // high-throughput read jobs on your Bigtable data, without impacting the + // performance of the clusters that handle your application traffic. + // Currently, Data Boost exclusively supports read-only use-cases with + // single-cluster routing. + // + // Data Boost reads are only guaranteed to see the results of writes that + // were written at least 30 minutes ago. This means newly written values may + // not become visible for up to 30m, and also means that old values may + // remain visible for up to 30m after being deleted or overwritten. To + // mitigate the staleness of the data, users may either wait 30m, or use + // CheckConsistency. + message DataBoostIsolationReadOnly { + // Compute Billing Owner specifies how usage should be accounted when using + // Data Boost. Compute Billing Owner also configures which Cloud Project is + // charged for relevant quota. + enum ComputeBillingOwner { + // Unspecified value. + COMPUTE_BILLING_OWNER_UNSPECIFIED = 0; + + // The host Cloud Project containing the targeted Bigtable Instance / + // Table pays for compute. + HOST_PAYS = 1; + } + + // The Compute Billing Owner for this Data Boost App Profile. + optional ComputeBillingOwner compute_billing_owner = 1; + } + // The unique name of the app profile. Values are of the form // `projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`. string name = 1; @@ -335,6 +364,10 @@ message AppProfile { // The standard options used for isolating this app profile's traffic from // other use cases. StandardIsolation standard_isolation = 11; + + // Specifies that this app profile is intended for read-only usage via the + // Data Boost feature. + DataBoostIsolationReadOnly data_boost_isolation_read_only = 10; } } diff --git a/proto-google-cloud-bigtable-v2/pom.xml b/proto-google-cloud-bigtable-v2/pom.xml index 0a30e75756..bd8e708c13 100644 --- a/proto-google-cloud-bigtable-v2/pom.xml +++ b/proto-google-cloud-bigtable-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigtable-v2 - 2.37.0 + 2.38.0 proto-google-cloud-bigtable-v2 PROTO library for proto-google-cloud-bigtable-v2 com.google.cloud google-cloud-bigtable-parent - 2.37.0 + 2.38.0 @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.37.0 + 2.38.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.37.0 + 2.38.0 pom import diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlags.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlags.java index 8ec9e22fa2..aa9f34a669 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlags.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlags.java @@ -184,6 +184,24 @@ public boolean getRetryInfo() { return retryInfo_; } + public static final int CLIENT_SIDE_METRICS_ENABLED_FIELD_NUMBER = 8; + private boolean clientSideMetricsEnabled_ = false; + /** + * + * + *
+   * Notify the server that the client has client side metrics enabled.
+   * 
+ * + * bool client_side_metrics_enabled = 8; + * + * @return The clientSideMetricsEnabled. + */ + @java.lang.Override + public boolean getClientSideMetricsEnabled() { + return clientSideMetricsEnabled_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -216,6 +234,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (retryInfo_ != false) { output.writeBool(7, retryInfo_); } + if (clientSideMetricsEnabled_ != false) { + output.writeBool(8, clientSideMetricsEnabled_); + } getUnknownFields().writeTo(output); } @@ -243,6 +264,9 @@ public int getSerializedSize() { if (retryInfo_ != false) { size += com.google.protobuf.CodedOutputStream.computeBoolSize(7, retryInfo_); } + if (clientSideMetricsEnabled_ != false) { + size += com.google.protobuf.CodedOutputStream.computeBoolSize(8, clientSideMetricsEnabled_); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -264,6 +288,7 @@ public boolean equals(final java.lang.Object obj) { if (getLastScannedRowResponses() != other.getLastScannedRowResponses()) return false; if (getRoutingCookie() != other.getRoutingCookie()) return false; if (getRetryInfo() != other.getRetryInfo()) return false; + if (getClientSideMetricsEnabled() != other.getClientSideMetricsEnabled()) return false; if (!getUnknownFields().equals(other.getUnknownFields())) return false; return true; } @@ -287,6 +312,8 @@ public int hashCode() { hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRoutingCookie()); hash = (37 * hash) + RETRY_INFO_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getRetryInfo()); + hash = (37 * hash) + CLIENT_SIDE_METRICS_ENABLED_FIELD_NUMBER; + hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getClientSideMetricsEnabled()); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash; @@ -438,6 +465,7 @@ public Builder clear() { lastScannedRowResponses_ = false; routingCookie_ = false; retryInfo_ = false; + clientSideMetricsEnabled_ = false; return this; } @@ -491,6 +519,9 @@ private void buildPartial0(com.google.bigtable.v2.FeatureFlags result) { if (((from_bitField0_ & 0x00000020) != 0)) { result.retryInfo_ = retryInfo_; } + if (((from_bitField0_ & 0x00000040) != 0)) { + result.clientSideMetricsEnabled_ = clientSideMetricsEnabled_; + } } @java.lang.Override @@ -556,6 +587,9 @@ public Builder mergeFrom(com.google.bigtable.v2.FeatureFlags other) { if (other.getRetryInfo() != false) { setRetryInfo(other.getRetryInfo()); } + if (other.getClientSideMetricsEnabled() != false) { + setClientSideMetricsEnabled(other.getClientSideMetricsEnabled()); + } this.mergeUnknownFields(other.getUnknownFields()); onChanged(); return this; @@ -618,6 +652,12 @@ public Builder mergeFrom( bitField0_ |= 0x00000020; break; } // case 56 + case 64: + { + clientSideMetricsEnabled_ = input.readBool(); + bitField0_ |= 0x00000040; + break; + } // case 64 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -979,6 +1019,59 @@ public Builder clearRetryInfo() { return this; } + private boolean clientSideMetricsEnabled_; + /** + * + * + *
+     * Notify the server that the client has client side metrics enabled.
+     * 
+ * + * bool client_side_metrics_enabled = 8; + * + * @return The clientSideMetricsEnabled. + */ + @java.lang.Override + public boolean getClientSideMetricsEnabled() { + return clientSideMetricsEnabled_; + } + /** + * + * + *
+     * Notify the server that the client has client side metrics enabled.
+     * 
+ * + * bool client_side_metrics_enabled = 8; + * + * @param value The clientSideMetricsEnabled to set. + * @return This builder for chaining. + */ + public Builder setClientSideMetricsEnabled(boolean value) { + + clientSideMetricsEnabled_ = value; + bitField0_ |= 0x00000040; + onChanged(); + return this; + } + /** + * + * + *
+     * Notify the server that the client has client side metrics enabled.
+     * 
+ * + * bool client_side_metrics_enabled = 8; + * + * @return This builder for chaining. + */ + public Builder clearClientSideMetricsEnabled() { + bitField0_ = (bitField0_ & ~0x00000040); + clientSideMetricsEnabled_ = false; + onChanged(); + return this; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsOrBuilder.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsOrBuilder.java index 4e5e3c3f6a..f58d5c96d8 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsOrBuilder.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsOrBuilder.java @@ -109,4 +109,17 @@ public interface FeatureFlagsOrBuilder * @return The retryInfo. */ boolean getRetryInfo(); + + /** + * + * + *
+   * Notify the server that the client has client side metrics enabled.
+   * 
+ * + * bool client_side_metrics_enabled = 8; + * + * @return The clientSideMetricsEnabled. + */ + boolean getClientSideMetricsEnabled(); } diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsProto.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsProto.java index 0bd27e36b5..30301d352a 100644 --- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsProto.java +++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/FeatureFlagsProto.java @@ -42,17 +42,18 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { static { java.lang.String[] descriptorData = { "\n&google/bigtable/v2/feature_flags.proto" - + "\022\022google.bigtable.v2\"\266\001\n\014FeatureFlags\022\025\n" + + "\022\022google.bigtable.v2\"\333\001\n\014FeatureFlags\022\025\n" + "\rreverse_scans\030\001 \001(\010\022\036\n\026mutate_rows_rate" + "_limit\030\003 \001(\010\022\037\n\027mutate_rows_rate_limit2\030" + "\005 \001(\010\022\"\n\032last_scanned_row_responses\030\004 \001(" + "\010\022\026\n\016routing_cookie\030\006 \001(\010\022\022\n\nretry_info\030" - + "\007 \001(\010B\275\001\n\026com.google.bigtable.v2B\021Featur" - + "eFlagsProtoP\001Z:google.golang.org/genprot" - + "o/googleapis/bigtable/v2;bigtable\252\002\030Goog" - + "le.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigt" - + "able\\V2\352\002\033Google::Cloud::Bigtable::V2b\006p" - + "roto3" + + "\007 \001(\010\022#\n\033client_side_metrics_enabled\030\010 \001" + + "(\010B\275\001\n\026com.google.bigtable.v2B\021FeatureFl" + + "agsProtoP\001Z:google.golang.org/genproto/g" + + "oogleapis/bigtable/v2;bigtable\252\002\030Google." + + "Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtabl" + + "e\\V2\352\002\033Google::Cloud::Bigtable::V2b\006prot" + + "o3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -69,6 +70,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "LastScannedRowResponses", "RoutingCookie", "RetryInfo", + "ClientSideMetricsEnabled", }); } diff --git a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/feature_flags.proto b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/feature_flags.proto index d3128c5c67..bfce3180fe 100644 --- a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/feature_flags.proto +++ b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/feature_flags.proto @@ -58,4 +58,7 @@ message FeatureFlags { // Notify the server that the client supports using retry info back off // durations to retry requests with. bool retry_info = 7; + + // Notify the server that the client has client side metrics enabled. + bool client_side_metrics_enabled = 8; } diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index 48f9dd3756..473ddf3d26 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -25,13 +25,15 @@ + com.google.cloud google-cloud-bigtable - 2.36.0 + 2.38.0 + junit diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 60c3dd1b29..07dede2ada 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -28,7 +28,7 @@ com.google.cloud google-cloud-bigtable - 2.37.0 + 2.38.0 diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index 5a040c9b8c..807f1cecd9 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -23,14 +23,13 @@ UTF-8 - com.google.cloud libraries-bom - 26.25.0 + 26.37.0 pom import diff --git a/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java b/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java new file mode 100644 index 0000000000..8f3047442b --- /dev/null +++ b/samples/snippets/src/main/java/com/example/bigtable/AuthorizedViewExample.java @@ -0,0 +1,322 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigtable; + +import static com.google.cloud.bigtable.data.v2.models.Filters.FILTERS; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.api.gax.rpc.ServerStream; +import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient; +import com.google.cloud.bigtable.admin.v2.BigtableTableAdminSettings; +import com.google.cloud.bigtable.admin.v2.models.AuthorizedView; +import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest; +import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; +import com.google.cloud.bigtable.admin.v2.models.FamilySubsets; +import com.google.cloud.bigtable.admin.v2.models.SubsetView; +import com.google.cloud.bigtable.admin.v2.models.Table; +import com.google.cloud.bigtable.admin.v2.models.UpdateAuthorizedViewRequest; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId; +import com.google.cloud.bigtable.data.v2.models.Filters.Filter; +import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.Row; +import com.google.cloud.bigtable.data.v2.models.RowCell; +import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.protobuf.ByteString; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; + +public class AuthorizedViewExample { + + private static final String COLUMN_FAMILY = "cf"; + private static final String COLUMN_QUALIFIER_GREETING = "greeting"; + private static final String COLUMN_QUALIFIER_NAME = "name"; + private static final String ROW_KEY_PREFIX = "rowKey"; + private final String tableId; + private final String authorizedViewId; + private final BigtableTableAdminClient adminClient; + private final BigtableDataClient dataClient; + + public static void main(String[] args) throws IOException { + + if (args.length != 2) { + System.out.println("Missing required project id or instance id"); + return; + } + String projectId = args[0]; + String instanceId = args[1]; + + AuthorizedViewExample authorizedViewExample = + new AuthorizedViewExample(projectId, instanceId, "test-table", "test-authorized-view"); + authorizedViewExample.run(); + } + + public AuthorizedViewExample( + String projectId, String instanceId, String tableId, String authorizedViewId) + throws IOException { + this.tableId = tableId; + this.authorizedViewId = authorizedViewId; + + // Creates the settings to configure a bigtable data client. + BigtableDataSettings settings = + BigtableDataSettings.newBuilder().setProjectId(projectId).setInstanceId(instanceId).build(); + + // Creates a bigtable data client. + dataClient = BigtableDataClient.create(settings); + + // Creates the settings to configure a bigtable table admin client. + BigtableTableAdminSettings adminSettings = + BigtableTableAdminSettings.newBuilder() + .setProjectId(projectId) + .setInstanceId(instanceId) + .build(); + + // Creates a bigtable table admin client. + adminClient = BigtableTableAdminClient.create(adminSettings); + } + + public void close() { + dataClient.close(); + adminClient.close(); + } + + public void run() { + createTable(); + createAuthorizedView(); + updateAuthorizedView(); + getAuthorizedView(); + listAllAuthorizedViews(); + writeToAuthorizedView(); + readSingleRowFromAuthorizedView(); + readRowsWithFilterFromAuthorizedView(); + deleteAuthorizedView(); + deleteTable(); + close(); + } + + public void createTable() { + // Checks if table exists, creates table if it does not exist. + if (!adminClient.exists(tableId)) { + System.out.println("Table does not exist, creating table: " + tableId); + CreateTableRequest createTableRequest = + CreateTableRequest.of(tableId).addFamily(COLUMN_FAMILY); + Table table = adminClient.createTable(createTableRequest); + System.out.printf("Table: %s created successfully%n", table.getId()); + } + } + + public void deleteTable() { + // Deletes the entire table. + System.out.println("\nDelete table: " + tableId); + try { + adminClient.deleteTable(tableId); + System.out.printf("Table: %s deleted successfully%n", tableId); + } catch (NotFoundException e) { + System.err.println("Failed to delete a non-existent table: " + e.getMessage()); + } + } + + /** + * Demonstrates how to create an authorized view under a table with the specified configuration. + */ + public void createAuthorizedView() { + // Checks if the authorized view exists, creates it if it does not exist. + try { + adminClient.getAuthorizedView(tableId, authorizedViewId); + } catch (NotFoundException exception) { + System.out.printf("%nCreating authorized view %s in table %s%n", authorizedViewId, tableId); + // [START bigtable_create_authorized_view] + try { + CreateAuthorizedViewRequest request = + CreateAuthorizedViewRequest.of(tableId, authorizedViewId) + .setAuthorizedViewType( + SubsetView.create() + .addRowPrefix("") + .setFamilySubsets( + COLUMN_FAMILY, + FamilySubsets.create().addQualifierPrefix(COLUMN_QUALIFIER_NAME))); + AuthorizedView authorizedView = adminClient.createAuthorizedView(request); + System.out.printf("AuthorizedView: %s created successfully%n", authorizedView.getId()); + } catch (NotFoundException e) { + System.err.println( + "Failed to create an authorized view from a non-existent table: " + e.getMessage()); + } + // [END bigtable_create_authorized_view] + } + } + + /** Demonstrates how to modify an authorized view. */ + public void updateAuthorizedView() { + System.out.printf("%nUpdating authorized view %s in table %s%n", authorizedViewId, tableId); + // [START bigtable_update_authorized_view] + try { + // Update to an authorized view permitting everything. + UpdateAuthorizedViewRequest request = + UpdateAuthorizedViewRequest.of(tableId, authorizedViewId) + .setAuthorizedViewType( + SubsetView.create() + .addRowPrefix("") + .setFamilySubsets( + COLUMN_FAMILY, FamilySubsets.create().addQualifierPrefix(""))); + AuthorizedView authorizedView = adminClient.updateAuthorizedView(request); + System.out.printf("AuthorizedView: %s updated successfully%n", authorizedView.getId()); + } catch (NotFoundException e) { + System.err.println("Failed to modify a non-existent authorized view: " + e.getMessage()); + } + // [END bigtable_update_authorized_view] + } + + /** Demonstrates how to get an authorized view's metadata. */ + public AuthorizedView getAuthorizedView() { + System.out.printf("%nGetting authorized view %s in table %s%n", authorizedViewId, tableId); + // [START bigtable_get_authorized_view] + AuthorizedView authorizedView = null; + try { + authorizedView = adminClient.getAuthorizedView(tableId, authorizedViewId); + SubsetView subsetView = (SubsetView) authorizedView.getAuthorizedViewType(); + + for (ByteString rowPrefix : subsetView.getRowPrefixes()) { + System.out.printf("Row Prefix: %s%n", rowPrefix.toStringUtf8()); + } + for (Map.Entry entry : subsetView.getFamilySubsets().entrySet()) { + for (ByteString qualifierPrefix : entry.getValue().getQualifierPrefixes()) { + System.out.printf( + "Column Family: %s, Qualifier Prefix: %s%n", + entry.getKey(), qualifierPrefix.toStringUtf8()); + } + for (ByteString qualifier : entry.getValue().getQualifiers()) { + System.out.printf( + "Column Family: %s, Qualifier: %s%n", entry.getKey(), qualifier.toStringUtf8()); + } + } + } catch (NotFoundException e) { + System.err.println( + "Failed to retrieve metadata from a non-existent authorized view: " + e.getMessage()); + } + // [END bigtable_get_authorized_view] + return authorizedView; + } + + /** Demonstrates how to list all authorized views within a table. */ + public List listAllAuthorizedViews() { + System.out.printf("%nListing authorized views in table %s%n", tableId); + // [START bigtable_list_authorized_views] + List authorizedViewIds = new ArrayList<>(); + try { + authorizedViewIds = adminClient.listAuthorizedViews(tableId); + for (String authorizedViewId : authorizedViewIds) { + System.out.println(authorizedViewId); + } + } catch (NotFoundException e) { + System.err.println( + "Failed to list authorized views from a non-existent table: " + e.getMessage()); + } + // [END bigtable_list_authorized_views] + return authorizedViewIds; + } + + /** Demonstrates how to delete an authorized view. */ + public void deleteAuthorizedView() { + System.out.printf("%nDeleting authorized view %s in table %s%n", authorizedViewId, tableId); + // [START bigtable_delete_authorized_view] + try { + adminClient.deleteAuthorizedView(tableId, authorizedViewId); + System.out.printf("AuthorizedView: %s deleted successfully%n", authorizedViewId); + } catch (NotFoundException e) { + System.err.println("Failed to delete a non-existent authorized view: " + e.getMessage()); + } + // [END bigtable_delete_authorized_view] + } + + /** Demonstrates how to write some rows to an authorized view. */ + public void writeToAuthorizedView() { + // [START bigtable_authorized_view_write_rows] + try { + System.out.println("\nWriting to authorized view"); + String[] names = {"World", "Bigtable", "Java"}; + for (int i = 0; i < names.length; i++) { + String greeting = "Hello " + names[i] + "!"; + RowMutation rowMutation = + RowMutation.create(AuthorizedViewId.of(tableId, authorizedViewId), ROW_KEY_PREFIX + i) + .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_NAME, names[i]) + .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_GREETING, greeting); + dataClient.mutateRow(rowMutation); + System.out.println(greeting); + } + } catch (Exception e) { + if (e instanceof NotFoundException) { + System.err.println("Failed to write to non-existent authorized view: " + e.getMessage()); + } else if (e instanceof PermissionDeniedException) { + System.err.println( + "Failed to apply mutations outside of the authorized view: " + e.getMessage()); + } + } + // [END bigtable_authorized_view_write_rows] + } + + /** Demonstrates how to read a single row from an authorized view. */ + public Row readSingleRowFromAuthorizedView() { + // [START bigtable_authorized_view_get_by_key] + try { + System.out.println("\nReading a single row by row key from an authorized view"); + Row row = + dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), ROW_KEY_PREFIX + 0); + System.out.println("Row: " + row.getKey().toStringUtf8()); + for (RowCell cell : row.getCells()) { + System.out.printf( + "Family: %s Qualifier: %s Value: %s%n", + cell.getFamily(), cell.getQualifier().toStringUtf8(), cell.getValue().toStringUtf8()); + } + return row; + } catch (NotFoundException e) { + System.err.println("Failed to read from a non-existent authorized view: " + e.getMessage()); + return null; + } + // [END bigtable_authorized_view_get_by_key] + } + + /** Demonstrates how to read rows from an authorized view with a filter. */ + public List readRowsWithFilterFromAuthorizedView() { + // [START bigtable_authorized_view_scan_with_filter] + try { + // A filter that matches only the most recent cell within each column + Filter filter = FILTERS.limit().cellsPerColumn(1); + System.out.println("\nScanning authorized view with filter"); + Query query = Query.create(AuthorizedViewId.of(tableId, authorizedViewId)).filter(filter); + ServerStream rowStream = dataClient.readRows(query); + List authorizedViewRows = new ArrayList<>(); + for (Row r : rowStream) { + System.out.println("Row Key: " + r.getKey().toStringUtf8()); + authorizedViewRows.add(r); + for (RowCell cell : r.getCells()) { + System.out.printf( + "Family: %s Qualifier: %s Value: %s%n", + cell.getFamily(), cell.getQualifier().toStringUtf8(), cell.getValue().toStringUtf8()); + } + } + return authorizedViewRows; + } catch (NotFoundException e) { + System.err.println("Failed to read a non-existent authorized view: " + e.getMessage()); + return null; + } + // [END bigtable_authorized_view_scan_with_filter] + } +} diff --git a/samples/snippets/src/main/java/com/example/bigtable/Filters.java b/samples/snippets/src/main/java/com/example/bigtable/Filters.java index 54f3282573..c27437da58 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/Filters.java +++ b/samples/snippets/src/main/java/com/example/bigtable/Filters.java @@ -26,6 +26,7 @@ import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowCell; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; import java.time.Instant; import java.time.temporal.ChronoUnit; @@ -360,7 +361,7 @@ private static void readFilter( // once, and can be reused for multiple requests. After completing all of your requests, call // the "close" method on the client to safely clean up any remaining background resources. try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { - Query query = Query.create(tableId).filter(filter); + Query query = Query.create(TableId.of(tableId)).filter(filter); ServerStream rows = dataClient.readRows(query); for (Row row : rows) { printRow(row); diff --git a/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java b/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java index 724985ce22..99bc25735d 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java +++ b/samples/snippets/src/main/java/com/example/bigtable/HelloWorld.java @@ -32,6 +32,7 @@ import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowCell; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; @@ -138,7 +139,7 @@ public void writeToTable() { for (int i = 0; i < names.length; i++) { String greeting = "Hello " + names[i] + "!"; RowMutation rowMutation = - RowMutation.create(tableId, ROW_KEY_PREFIX + i) + RowMutation.create(TableId.of(tableId), ROW_KEY_PREFIX + i) .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_NAME, names[i]) .setCell(COLUMN_FAMILY, COLUMN_QUALIFIER_GREETING, greeting); dataClient.mutateRow(rowMutation); @@ -155,7 +156,7 @@ public Row readSingleRow() { // [START bigtable_hw_get_by_key] try { System.out.println("\nReading a single row by row key"); - Row row = dataClient.readRow(tableId, ROW_KEY_PREFIX + 0); + Row row = dataClient.readRow(TableId.of(tableId), ROW_KEY_PREFIX + 0); System.out.println("Row: " + row.getKey().toStringUtf8()); for (RowCell cell : row.getCells()) { System.out.printf( @@ -175,7 +176,7 @@ public List readSpecificCells() { // [START bigtable_hw_get_by_key] try { System.out.println("\nReading specific cells by family and qualifier"); - Row row = dataClient.readRow(tableId, ROW_KEY_PREFIX + 0); + Row row = dataClient.readRow(TableId.of(tableId), ROW_KEY_PREFIX + 0); System.out.println("Row: " + row.getKey().toStringUtf8()); List cells = row.getCells(COLUMN_FAMILY, COLUMN_QUALIFIER_NAME); for (RowCell cell : cells) { @@ -196,7 +197,7 @@ public List readTable() { // [START bigtable_hw_scan_all] try { System.out.println("\nReading the entire table"); - Query query = Query.create(tableId); + Query query = Query.create(TableId.of(tableId)); ServerStream rowStream = dataClient.readRows(query); List tableRows = new ArrayList<>(); for (Row r : rowStream) { @@ -229,7 +230,7 @@ public void filterLimitCellsPerCol(String tableId) { private void readRowFilter(String tableId, Filter filter) { String rowKey = Base64.getEncoder().encodeToString("greeting0".getBytes(StandardCharsets.UTF_8)); - Row row = dataClient.readRow(tableId, rowKey, filter); + Row row = dataClient.readRow(TableId.of(tableId), rowKey, filter); printRow(row); System.out.println("Row filter completed."); } @@ -237,7 +238,7 @@ private void readRowFilter(String tableId, Filter filter) { // [START bigtable_hw_scan_with_filter] private void readFilter(String tableId, Filter filter) { - Query query = Query.create(tableId).filter(filter); + Query query = Query.create(TableId.of(tableId)).filter(filter); ServerStream rows = dataClient.readRows(query); for (Row row : rows) { printRow(row); diff --git a/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java b/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java index 0bdae948d2..df813ace39 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/InstanceAdminExample.java @@ -87,6 +87,11 @@ public void run() { addCluster(); deleteCluster(); deleteInstance(); + close(); + } + + // Close the client + void close() { adminClient.close(); } diff --git a/samples/snippets/src/main/java/com/example/bigtable/KeySalting.java b/samples/snippets/src/main/java/com/example/bigtable/KeySalting.java index da5e401347..436ab139a4 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/KeySalting.java +++ b/samples/snippets/src/main/java/com/example/bigtable/KeySalting.java @@ -21,6 +21,7 @@ import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; import java.util.ArrayList; import java.util.List; @@ -35,7 +36,7 @@ public static void writeSaltedRow( BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId); String saltedRowKey = getSaltedRowKey(rowKey, SALT_RANGE); RowMutation rowMutation = - RowMutation.create(tableId, saltedRowKey) + RowMutation.create(TableId.of(tableId), saltedRowKey) .setCell(COLUMN_FAMILY_NAME, "os_build", "PQ2A.190405.003"); dataClient.mutateRow(rowMutation); @@ -47,7 +48,7 @@ public static void writeSaltedRow( public static void readSaltedRow( String projectId, String instanceId, String tableId, String rowKey) throws IOException { BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId); - Row row = dataClient.readRow(tableId, getSaltedRowKey(rowKey, SALT_RANGE)); + Row row = dataClient.readRow(TableId.of(tableId), getSaltedRowKey(rowKey, SALT_RANGE)); System.out.printf("Successfully read row %s\n", row.getKey().toStringUtf8()); } @@ -58,7 +59,7 @@ public static void scanSaltedRows( List queries = new ArrayList<>(); for (int i = 0; i < SALT_RANGE; i++) { - queries.add(Query.create(tableId).prefix(i + "-" + prefix)); + queries.add(Query.create(TableId.of(tableId)).prefix(i + "-" + prefix)); } List>> futures = new ArrayList<>(); diff --git a/samples/snippets/src/main/java/com/example/bigtable/Quickstart.java b/samples/snippets/src/main/java/com/example/bigtable/Quickstart.java index 8654d31edd..4c0a415c6d 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/Quickstart.java +++ b/samples/snippets/src/main/java/com/example/bigtable/Quickstart.java @@ -23,6 +23,7 @@ import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowCell; +import com.google.cloud.bigtable.data.v2.models.TableId; public class Quickstart { @@ -43,7 +44,7 @@ public static void quickstart(String projectId, String instanceId, String tableI // the "close" method on the client to safely clean up any remaining background resources. try (BigtableDataClient dataClient = BigtableDataClient.create(settings)) { System.out.println("\nReading a single row by row key"); - Row row = dataClient.readRow(tableId, "r1"); + Row row = dataClient.readRow(TableId.of(tableId), "r1"); System.out.println("Row: " + row.getKey().toStringUtf8()); for (RowCell cell : row.getCells()) { System.out.printf( diff --git a/samples/snippets/src/main/java/com/example/bigtable/Reads.java b/samples/snippets/src/main/java/com/example/bigtable/Reads.java index 90bc847bd5..a5a7923bb3 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/Reads.java +++ b/samples/snippets/src/main/java/com/example/bigtable/Reads.java @@ -26,6 +26,7 @@ import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowCell; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; public class Reads { @@ -48,7 +49,7 @@ public static void readRow(String projectId, String instanceId, String tableId) try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { String rowkey = "phone#4c410523#20190501"; - Row row = dataClient.readRow(tableId, rowkey); + Row row = dataClient.readRow(TableId.of(tableId), rowkey); printRow(row); } catch (IOException e) { @@ -79,7 +80,7 @@ public static void readRowPartial(String projectId, String instanceId, String ta .filter(FILTERS.family().exactMatch("stats_summary")) .filter(FILTERS.qualifier().exactMatch("os_build")); - Row row = dataClient.readRow(tableId, rowkey, filter); + Row row = dataClient.readRow(TableId.of(tableId), rowkey, filter); printRow(row); } catch (IOException e) { @@ -104,7 +105,9 @@ public static void readRows(String projectId, String instanceId, String tableId) // the "close" method on the client to safely clean up any remaining background resources. try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { Query query = - Query.create(tableId).rowKey("phone#4c410523#20190501").rowKey("phone#4c410523#20190502"); + Query.create(TableId.of(tableId)) + .rowKey("phone#4c410523#20190501") + .rowKey("phone#4c410523#20190502"); ServerStream rows = dataClient.readRows(query); for (Row row : rows) { printRow(row); @@ -133,7 +136,7 @@ public static void readRowRange(String projectId, String instanceId, String tabl // once, and can be reused for multiple requests. After completing all of your requests, call // the "close" method on the client to safely clean up any remaining background resources. try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { - Query query = Query.create(tableId).range(start, end); + Query query = Query.create(TableId.of(tableId)).range(start, end); ServerStream rows = dataClient.readRows(query); for (Row row : rows) { printRow(row); @@ -160,7 +163,7 @@ public static void readRowRanges(String projectId, String instanceId, String tab // the "close" method on the client to safely clean up any remaining background resources. try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { Query query = - Query.create(tableId) + Query.create(TableId.of(tableId)) .range("phone#4c410523#20190501", "phone#4c410523#20190601") .range("phone#5c10102#20190501", "phone#5c10102#20190601"); ServerStream rows = dataClient.readRows(query); @@ -188,7 +191,7 @@ public static void readPrefix(String projectId, String instanceId, String tableI // once, and can be reused for multiple requests. After completing all of your requests, call // the "close" method on the client to safely clean up any remaining background resources. try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { - Query query = Query.create(tableId).prefix("phone"); + Query query = Query.create(TableId.of(tableId)).prefix("phone"); ServerStream rows = dataClient.readRows(query); for (Row row : rows) { printRow(row); @@ -215,7 +218,7 @@ public static void readRowsReversed(String projectId, String instanceId, String // the "close" method on the client to safely clean up any remaining background resources. try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { Query query = - Query.create(tableId) + Query.create(TableId.of(tableId)) .reversed(true) .limit(3) .prefix("phone#4c410523") @@ -247,7 +250,7 @@ public static void readFilter(String projectId, String instanceId, String tableI // once, and can be reused for multiple requests. After completing all of your requests, call // the "close" method on the client to safely clean up any remaining background resources. try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { - Query query = Query.create(tableId).filter(filter); + Query query = Query.create(TableId.of(tableId)).filter(filter); ServerStream rows = dataClient.readRows(query); for (Row row : rows) { printRow(row); diff --git a/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java b/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java index 9842658a82..5f804153a1 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/TableAdminExample.java @@ -108,6 +108,11 @@ public void run() { printModifiedColumnFamily(); deleteColumnFamily(); deleteTable(); + close(); + } + + // Close the client + void close() { adminClient.close(); } diff --git a/samples/snippets/src/main/java/com/example/bigtable/WriteBatch.java b/samples/snippets/src/main/java/com/example/bigtable/WriteBatch.java index 28a8b201e9..24e1000a2b 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/WriteBatch.java +++ b/samples/snippets/src/main/java/com/example/bigtable/WriteBatch.java @@ -23,6 +23,7 @@ import com.google.api.gax.batching.BatchingException; import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.protobuf.ByteString; import java.util.ArrayList; import java.util.List; @@ -38,7 +39,8 @@ public static void writeBatch(String projectId, String instanceId, String tableI try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { List> batchFutures = new ArrayList<>(); - try (Batcher batcher = dataClient.newBulkMutationBatcher(tableId)) { + try (Batcher batcher = + dataClient.newBulkMutationBatcher(TableId.of(tableId))) { long timestamp = System.currentTimeMillis() * 1000; batchFutures.add( batcher.add( diff --git a/samples/snippets/src/main/java/com/example/bigtable/WriteConditionally.java b/samples/snippets/src/main/java/com/example/bigtable/WriteConditionally.java index ac01cb0c63..82d5fbfaba 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/WriteConditionally.java +++ b/samples/snippets/src/main/java/com/example/bigtable/WriteConditionally.java @@ -24,6 +24,7 @@ import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.Filters.Filter; import com.google.cloud.bigtable.data.v2.models.Mutation; +import com.google.cloud.bigtable.data.v2.models.TableId; public class WriteConditionally { private static final String COLUMN_FAMILY_NAME = "stats_summary"; @@ -49,7 +50,9 @@ public static void writeConditionally(String projectId, String instanceId, Strin .filter(FILTERS.value().regex("PQ2A\\..*")); ConditionalRowMutation conditionalRowMutation = - ConditionalRowMutation.create(tableId, rowkey).condition(filter).then(mutation); + ConditionalRowMutation.create(TableId.of(tableId), rowkey) + .condition(filter) + .then(mutation); boolean success = dataClient.checkAndMutateRow(conditionalRowMutation); diff --git a/samples/snippets/src/main/java/com/example/bigtable/WriteIncrement.java b/samples/snippets/src/main/java/com/example/bigtable/WriteIncrement.java index 0f91a13717..4f832d5a98 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/WriteIncrement.java +++ b/samples/snippets/src/main/java/com/example/bigtable/WriteIncrement.java @@ -21,6 +21,7 @@ import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; import com.google.cloud.bigtable.data.v2.models.Row; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.nio.charset.Charset; public class WriteIncrement { @@ -36,7 +37,7 @@ public static void writeIncrement(String projectId, String instanceId, String ta // if it is encoded as a 64-bit big-endian signed integer. String rowkey = "phone#4c410523#20190501"; ReadModifyWriteRow mutation = - ReadModifyWriteRow.create(tableId, rowkey) + ReadModifyWriteRow.create(TableId.of(tableId), rowkey) .increment(COLUMN_FAMILY_NAME, "connected_cell", -1); Row success = dataClient.readModifyWriteRow(mutation); diff --git a/samples/snippets/src/main/java/com/example/bigtable/WriteSimple.java b/samples/snippets/src/main/java/com/example/bigtable/WriteSimple.java index 5d7f4d5b2a..f176703432 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/WriteSimple.java +++ b/samples/snippets/src/main/java/com/example/bigtable/WriteSimple.java @@ -20,6 +20,7 @@ import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.protobuf.ByteString; public class WriteSimple { @@ -36,7 +37,7 @@ public static void writeSimple(String projectId, String instanceId, String table String rowkey = "phone#4c410523#20190501"; RowMutation rowMutation = - RowMutation.create(tableId, rowkey) + RowMutation.create(TableId.of(tableId), rowkey) .setCell( COLUMN_FAMILY_NAME, ByteString.copyFrom("connected_cell".getBytes()), diff --git a/samples/snippets/src/main/java/com/example/bigtable/deletes/BatchDeleteExample.java b/samples/snippets/src/main/java/com/example/bigtable/deletes/BatchDeleteExample.java index 2b814c4e42..590a618f0b 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/deletes/BatchDeleteExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/deletes/BatchDeleteExample.java @@ -23,14 +23,16 @@ import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; public class BatchDeleteExample { public void batchDelete(String projectId, String instanceId, String tableId) throws InterruptedException, IOException { try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { - try (Batcher batcher = dataClient.newBulkMutationBatcher(tableId)) { - ServerStream rows = dataClient.readRows(Query.create(tableId)); + try (Batcher batcher = + dataClient.newBulkMutationBatcher(TableId.of(tableId))) { + ServerStream rows = dataClient.readRows(Query.create(TableId.of(tableId))); for (Row row : rows) { batcher.add( RowMutationEntry.create(row.getKey()).deleteCells("cell_plan", "data_plan_05gb")); diff --git a/samples/snippets/src/main/java/com/example/bigtable/deletes/ConditionalDeleteExample.java b/samples/snippets/src/main/java/com/example/bigtable/deletes/ConditionalDeleteExample.java index 4fac36404c..1da173321f 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/deletes/ConditionalDeleteExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/deletes/ConditionalDeleteExample.java @@ -21,6 +21,7 @@ import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; import com.google.cloud.bigtable.data.v2.models.Filters; import com.google.cloud.bigtable.data.v2.models.Mutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; public class ConditionalDeleteExample { @@ -30,7 +31,7 @@ public void conditionalDelete(String projectId, String instanceId, String tableI Filters.Filter condition = Filters.FILTERS.value().exactMatch("PQ2A.190405.004"); Mutation mutation = Mutation.create().deleteCells("stats_summary", "os_build"); dataClient.checkAndMutateRow( - ConditionalRowMutation.create(tableId, "phone#4c410523#20190502") + ConditionalRowMutation.create(TableId.of(tableId), "phone#4c410523#20190502") .condition(condition) .then(mutation)); } diff --git a/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromColumnExample.java b/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromColumnExample.java index 7403272d19..0c68168902 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromColumnExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromColumnExample.java @@ -20,6 +20,7 @@ import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.models.Mutation; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; public class DeleteFromColumnExample { @@ -27,7 +28,8 @@ public void deleteFromColumnCells(String projectId, String instanceId, String ta throws IOException { try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { Mutation mutation = Mutation.create().deleteCells("cell_plan", "data_plan_01gb"); - dataClient.mutateRow(RowMutation.create(tableId, "phone#4c410523#20190501", mutation)); + dataClient.mutateRow( + RowMutation.create(TableId.of(tableId), "phone#4c410523#20190501", mutation)); } } } diff --git a/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromColumnFamilyExample.java b/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromColumnFamilyExample.java index db89c04e4b..64016a6ef0 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromColumnFamilyExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromColumnFamilyExample.java @@ -19,6 +19,7 @@ // [START bigtable_delete_from_column_family] import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; public class DeleteFromColumnFamilyExample { @@ -26,7 +27,8 @@ public void deleteFromColumnFamily(String projectId, String instanceId, String t throws IOException { try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { dataClient.mutateRow( - RowMutation.create(tableId, "phone#5c10102#20190501").deleteFamily("stats_summary")); + RowMutation.create(TableId.of(tableId), "phone#5c10102#20190501") + .deleteFamily("stats_summary")); } } } diff --git a/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromRowExample.java b/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromRowExample.java index 029298497f..f1d283722f 100644 --- a/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromRowExample.java +++ b/samples/snippets/src/main/java/com/example/bigtable/deletes/DeleteFromRowExample.java @@ -20,6 +20,7 @@ import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.models.Mutation; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; public class DeleteFromRowExample { @@ -27,7 +28,8 @@ public void deleteFromRow(String projectId, String instanceId, String tableId) throws IOException { try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { Mutation mutation = Mutation.create().deleteRow(); - dataClient.mutateRow(RowMutation.create(tableId, "phone#4c410523#20190501", mutation)); + dataClient.mutateRow( + RowMutation.create(TableId.of(tableId), "phone#4c410523#20190501", mutation)); } } } diff --git a/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java new file mode 100644 index 0000000000..5990d66107 --- /dev/null +++ b/samples/snippets/src/test/java/com/example/bigtable/AuthorizedViewExampleTest.java @@ -0,0 +1,210 @@ +/* + * Copyright 2024 Google Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + *http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.example.bigtable; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertThrows; + +import com.google.api.gax.rpc.NotFoundException; +import com.google.cloud.bigtable.admin.v2.BigtableTableAdminClient; +import com.google.cloud.bigtable.admin.v2.BigtableTableAdminSettings; +import com.google.cloud.bigtable.admin.v2.models.AuthorizedView; +import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest; +import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; +import com.google.cloud.bigtable.admin.v2.models.FamilySubsets; +import com.google.cloud.bigtable.admin.v2.models.SubsetView; +import com.google.cloud.bigtable.data.v2.BigtableDataClient; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId; +import com.google.cloud.bigtable.data.v2.models.Row; +import com.google.cloud.bigtable.data.v2.models.RowCell; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import org.junit.After; +import org.junit.AfterClass; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class AuthorizedViewExampleTest extends BigtableBaseTest { + + private static final String TABLE_PREFIX = "table"; + private static final String AUTHORIZED_VIEW_PREFIX = "authorized-view"; + private static final String COLUMN_FAMILY = "cf"; + private String tableId; + private String authorizedViewId; + private static BigtableDataClient dataClient; + private static BigtableTableAdminClient adminClient; + private AuthorizedViewExample authorizedViewExample; + + @BeforeClass + public static void beforeClass() throws IOException { + initializeVariables(); + BigtableDataSettings settings = + BigtableDataSettings.newBuilder().setProjectId(projectId).setInstanceId(instanceId).build(); + dataClient = BigtableDataClient.create(settings); + BigtableTableAdminSettings adminSettings = + BigtableTableAdminSettings.newBuilder() + .setProjectId(projectId) + .setInstanceId(instanceId) + .build(); + adminClient = BigtableTableAdminClient.create(adminSettings); + } + + @AfterClass + public static void afterClass() { + garbageCollect(); + dataClient.close(); + adminClient.close(); + } + + @Before + public void setup() throws IOException { + tableId = generateResourceId(TABLE_PREFIX); + authorizedViewId = generateResourceId(AUTHORIZED_VIEW_PREFIX); + authorizedViewExample = + new AuthorizedViewExample(projectId, instanceId, tableId, authorizedViewId); + adminClient.createTable(CreateTableRequest.of(tableId).addFamily(COLUMN_FAMILY)); + adminClient.createAuthorizedView( + CreateAuthorizedViewRequest.of(tableId, authorizedViewId) + .setAuthorizedViewType( + SubsetView.create() + .addRowPrefix("") + .setFamilySubsets( + COLUMN_FAMILY, FamilySubsets.create().addQualifierPrefix("")))); + } + + @After + public void after() { + if (adminClient.exists(tableId)) { + // Deleting a table also deletes all the authorized views inside it. + adminClient.deleteTable(tableId); + } + authorizedViewExample.close(); + } + + @Test + public void testRunDoesNotFail() { + authorizedViewExample.run(); + } + + @Test + public void testAuthorizedViewCreateUpdateDelete() throws IOException { + // Creates an authorized view. + String testAuthorizedViewId = generateResourceId(AUTHORIZED_VIEW_PREFIX); + AuthorizedViewExample testAuthorizedViewExample = + new AuthorizedViewExample(projectId, instanceId, tableId, testAuthorizedViewId); + testAuthorizedViewExample.createAuthorizedView(); + AuthorizedView authorizedView = adminClient.getAuthorizedView(tableId, testAuthorizedViewId); + assertEquals(authorizedView.getId(), testAuthorizedViewId); + + // Updates the authorized view. + testAuthorizedViewExample.updateAuthorizedView(); + AuthorizedView updatedAuthorizedView = + adminClient.getAuthorizedView(tableId, testAuthorizedViewId); + assertNotEquals(authorizedView, updatedAuthorizedView); + + // Deletes the authorized view. + testAuthorizedViewExample.deleteAuthorizedView(); + assertThrows( + NotFoundException.class, + () -> adminClient.getAuthorizedView(tableId, testAuthorizedViewId)); + + testAuthorizedViewExample.close(); + } + + @Test + public void testGetAuthorizedView() { + AuthorizedView authorizedView = authorizedViewExample.getAuthorizedView(); + assertNotNull(authorizedView); + assertEquals(authorizedView.getId(), authorizedViewId); + } + + @Test + public void testListAuthorizedView() { + List authorizedViewIds = authorizedViewExample.listAllAuthorizedViews(); + assertEquals(authorizedViewIds.size(), 1); + assertEquals(authorizedViewIds.get(0), authorizedViewId); + } + + @Test + public void testWriteToAuthorizedView() { + assertNull(dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), "rowKey0")); + authorizedViewExample.writeToAuthorizedView(); + assertNotNull(dataClient.readRow(AuthorizedViewId.of(tableId, authorizedViewId), "rowKey0")); + } + + @Test + public void testReadsFromAuthorizedView() { + authorizedViewExample.writeToAuthorizedView(); + + Row actualRow = authorizedViewExample.readSingleRowFromAuthorizedView(); + assertEquals("rowKey0", actualRow.getKey().toStringUtf8()); + assertEquals(2, actualRow.getCells().size()); + assertEquals("Hello World!", actualRow.getCells().get(0).getValue().toStringUtf8()); + assertEquals("World", actualRow.getCells().get(1).getValue().toStringUtf8()); + + List rows = authorizedViewExample.readRowsWithFilterFromAuthorizedView(); + List printedRows = new ArrayList<>(); + for (Row row : rows) { + for (RowCell cell : row.getCells()) { + printedRows.add( + String.format( + "%s_%s_%s:%s", + row.getKey().toStringUtf8(), + cell.getFamily(), + cell.getQualifier().toStringUtf8(), + cell.getValue().toStringUtf8())); + } + } + String[] expectedRows = + new String[] { + "rowKey0_cf_greeting:Hello World!", + "rowKey0_cf_name:World", + "rowKey1_cf_greeting:Hello Bigtable!", + "rowKey1_cf_name:Bigtable", + "rowKey2_cf_greeting:Hello Java!", + "rowKey2_cf_name:Java" + }; + assertEquals(printedRows, Arrays.asList(expectedRows)); + } + + private static void garbageCollect() { + Pattern timestampPattern = Pattern.compile(TABLE_PREFIX + "-([0-9a-f]+)-([0-9a-f]+)"); + for (String tableId : adminClient.listTables()) { + Matcher matcher = timestampPattern.matcher(tableId); + if (!matcher.matches()) { + continue; + } + String timestampStr = matcher.group(1); + long timestamp = Long.parseLong(timestampStr, 16); + if (System.currentTimeMillis() - timestamp < TimeUnit.MINUTES.toMillis(10)) { + continue; + } + System.out.println("\nGarbage collecting orphaned table: " + tableId); + adminClient.deleteTable(tableId); + } + } +} diff --git a/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java b/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java index 8845c587ba..5a4475e898 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/BigtableBaseTest.java @@ -55,7 +55,7 @@ public void tearDown() { bout.reset(); } - public static String generateTableId(String prefix) { + public static String generateResourceId(String prefix) { return prefix + "-" + UUID.randomUUID().toString().substring(0, 20); } diff --git a/samples/snippets/src/test/java/com/example/bigtable/HelloWorldTest.java b/samples/snippets/src/test/java/com/example/bigtable/HelloWorldTest.java index f0fca85d79..ead4d6c3fd 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/HelloWorldTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/HelloWorldTest.java @@ -26,6 +26,7 @@ import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest; import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; import java.util.Random; import java.util.concurrent.TimeUnit; @@ -99,9 +100,9 @@ public void testCreateAndDeleteTable() throws IOException { @Test public void testWriteToTable() { // Writes to a table. - assertNull(dataClient.readRow(tableId, "rowKey0")); + assertNull(dataClient.readRow(TableId.of(tableId), "rowKey0")); helloWorld.writeToTable(); - assertNotNull(dataClient.readRow(tableId, "rowKey0")); + assertNotNull(dataClient.readRow(TableId.of(tableId), "rowKey0")); } @Test diff --git a/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java index dc66b2f9a2..15df1f8fa5 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/InstanceAdminExampleTest.java @@ -78,6 +78,9 @@ public void after() { if (adminClient.exists(instanceId)) { adminClient.deleteInstance(instanceId); } + if (instanceAdmin != null) { + instanceAdmin.close(); + } } @Test diff --git a/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java b/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java index 00aec60498..98182187a3 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/MobileTimeSeriesBaseTest.java @@ -21,6 +21,7 @@ import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.Mutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.protobuf.ByteString; import java.io.IOException; import java.time.Instant; @@ -28,7 +29,7 @@ public class MobileTimeSeriesBaseTest extends BigtableBaseTest { - public static final String TABLE_ID = generateTableId("mobile-time-series"); + public static final String TABLE_ID = generateResourceId("mobile-time-series"); public static final String COLUMN_FAMILY_NAME_STATS = "stats_summary"; public static final String COLUMN_FAMILY_NAME_PLAN = "cell_plan"; public static final Instant CURRENT_TIME = Instant.now(); @@ -53,7 +54,7 @@ public static void createTable() throws IOException { public static void writeStatsData() throws IOException { try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { BulkMutation bulkMutation = - BulkMutation.create(TABLE_ID) + BulkMutation.create(TableId.of(TABLE_ID)) .add( "phone#4c410523#20190501", Mutation.create() @@ -135,7 +136,7 @@ public static void writeStatsData() throws IOException { public static void writePlanData() throws IOException { try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { BulkMutation bulkMutation = - BulkMutation.create(TABLE_ID) + BulkMutation.create(TableId.of(TABLE_ID)) .add( "phone#4c410523#20190501", Mutation.create() diff --git a/samples/snippets/src/test/java/com/example/bigtable/QuickstartTest.java b/samples/snippets/src/test/java/com/example/bigtable/QuickstartTest.java index ba00ba872a..b8271de50e 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/QuickstartTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/QuickstartTest.java @@ -23,6 +23,7 @@ import com.google.cloud.bigtable.data.v2.BigtableDataClient; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.models.TableId; import java.io.IOException; import org.hamcrest.CoreMatchers; import org.junit.BeforeClass; @@ -46,10 +47,11 @@ public static void beforeClass() throws IOException { } try (BigtableDataClient dataClient = BigtableDataClient.create(projectId, instanceId)) { String rowKey = "r1"; - Row row = dataClient.readRow(TABLE_ID, rowKey); + Row row = dataClient.readRow(TableId.of(TABLE_ID), rowKey); if (row == null) { dataClient.mutateRow( - RowMutation.create(TABLE_ID, rowKey).setCell(columnFamily, "c1", "quickstart")); + RowMutation.create(TableId.of(TABLE_ID), rowKey) + .setCell(columnFamily, "c1", "quickstart")); } } } diff --git a/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java b/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java index 391764b549..d4fd4de304 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/TableAdminExampleTest.java @@ -67,7 +67,7 @@ public static void afterClass() { @Before public void setup() throws IOException { - tableId = generateTableId(TABLE_PREFIX); + tableId = generateResourceId(TABLE_PREFIX); tableAdmin = new TableAdminExample(projectId, instanceId, tableId); adminClient.createTable(CreateTableRequest.of(tableId).addFamily("cf")); } @@ -77,12 +77,15 @@ public void after() { if (adminClient.exists(tableId)) { adminClient.deleteTable(tableId); } + if (tableAdmin != null) { + tableAdmin.close(); + } } @Test public void testCreateAndDeleteTable() throws IOException { // Creates a table. - String testTable = generateTableId(TABLE_PREFIX); + String testTable = generateResourceId(TABLE_PREFIX); TableAdminExample testTableAdmin = new TableAdminExample(projectId, instanceId, testTable); testTableAdmin.createTable(); assertTrue(adminClient.exists(testTable)); diff --git a/samples/snippets/src/test/java/com/example/bigtable/deletes/DeletesTest.java b/samples/snippets/src/test/java/com/example/bigtable/deletes/DeletesTest.java index bff3c74a1f..a2fa31c0d6 100644 --- a/samples/snippets/src/test/java/com/example/bigtable/deletes/DeletesTest.java +++ b/samples/snippets/src/test/java/com/example/bigtable/deletes/DeletesTest.java @@ -23,6 +23,7 @@ import com.google.cloud.bigtable.data.v2.models.Query; import com.google.cloud.bigtable.data.v2.models.Row; import com.google.cloud.bigtable.data.v2.models.RowCell; +import com.google.cloud.bigtable.data.v2.models.TableId; import com.google.common.truth.Truth; import java.io.IOException; import java.util.List; @@ -57,7 +58,7 @@ public static void afterClass() throws IOException { @Test public void test1_testDeleteFromColumn() throws IOException { String rowKey = "phone#4c410523#20190501"; - Row row = bigtableDataClient.readRow(TABLE_ID, rowKey); + Row row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); String qualifier = "data_plan_01gb"; List cells = row.getCells(COLUMN_FAMILY_NAME_PLAN, qualifier); @@ -65,7 +66,7 @@ public void test1_testDeleteFromColumn() throws IOException { DeleteFromColumnExample deleteFromColumnExample = new DeleteFromColumnExample(); deleteFromColumnExample.deleteFromColumnCells(projectId, instanceId, TABLE_ID); - row = bigtableDataClient.readRow(TABLE_ID, rowKey); + row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); List cellsAfterDelete = row.getCells(COLUMN_FAMILY_NAME_PLAN, qualifier); Truth.assertThat(cellsAfterDelete).isEmpty(); @@ -74,13 +75,13 @@ public void test1_testDeleteFromColumn() throws IOException { @Test public void test2_testDeleteFromRow() throws IOException { String rowKey = "phone#4c410523#20190501"; - Row row = bigtableDataClient.readRow(TABLE_ID, rowKey); + Row row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); Truth.assertThat(row).isNotNull(); DeleteFromRowExample deleteFromRowExample = new DeleteFromRowExample(); deleteFromRowExample.deleteFromRow(projectId, instanceId, TABLE_ID); - row = bigtableDataClient.readRow(TABLE_ID, rowKey); + row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); Truth.assertThat(row).isNull(); } @@ -88,7 +89,7 @@ public void test2_testDeleteFromRow() throws IOException { @Test public void test3_testStreamingAndBatching() throws IOException, InterruptedException { String rowKey = "phone#4c410523#20190502"; - Row row = bigtableDataClient.readRow(TABLE_ID, rowKey); + Row row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); String qualifier = "data_plan_05gb"; List cells = row.getCells(COLUMN_FAMILY_NAME_PLAN, qualifier); @@ -96,7 +97,7 @@ public void test3_testStreamingAndBatching() throws IOException, InterruptedExce BatchDeleteExample batchDeleteExample = new BatchDeleteExample(); batchDeleteExample.batchDelete(projectId, instanceId, TABLE_ID); - row = bigtableDataClient.readRow(TABLE_ID, rowKey); + row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); List cellsAfterDelete = row.getCells(COLUMN_FAMILY_NAME_PLAN, qualifier); Truth.assertThat(cellsAfterDelete).isEmpty(); @@ -105,7 +106,7 @@ public void test3_testStreamingAndBatching() throws IOException, InterruptedExce @Test public void test4_testCheckAndMutate() throws IOException { String rowKey = "phone#4c410523#20190502"; - Row row = bigtableDataClient.readRow(TABLE_ID, rowKey); + Row row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); String qualifier = "os_build"; List cells = row.getCells(COLUMN_FAMILY_NAME_STATS, qualifier); @@ -113,7 +114,7 @@ public void test4_testCheckAndMutate() throws IOException { ConditionalDeleteExample conditionalDeleteExample = new ConditionalDeleteExample(); conditionalDeleteExample.conditionalDelete(projectId, instanceId, TABLE_ID); - row = bigtableDataClient.readRow(TABLE_ID, rowKey); + row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); List cellsAfterDelete = row.getCells(COLUMN_FAMILY_NAME_STATS, qualifier); Truth.assertThat(cellsAfterDelete).isEmpty(); @@ -122,7 +123,7 @@ public void test4_testCheckAndMutate() throws IOException { @Test public void test5_testDropRowRange() throws IOException { String rowPrefix = "phone#4c410523"; - Query query = Query.create(TABLE_ID).prefix(rowPrefix); + Query query = Query.create(TableId.of(TABLE_ID)).prefix(rowPrefix); ServerStream rows = bigtableDataClient.readRows(query); int rowCount = 0; for (Row ignored : rows) { @@ -145,7 +146,7 @@ public void test5_testDropRowRange() throws IOException { @Test public void test6_testDeleteFromColumnFamily() throws IOException { String rowKey = "phone#5c10102#20190501"; - Row row = bigtableDataClient.readRow(TABLE_ID, rowKey); + Row row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); List cells = row.getCells(COLUMN_FAMILY_NAME_STATS); Truth.assertThat(cells).isNotEmpty(); @@ -153,7 +154,7 @@ public void test6_testDeleteFromColumnFamily() throws IOException { DeleteFromColumnFamilyExample deleteFromColumnFamilyExample = new DeleteFromColumnFamilyExample(); deleteFromColumnFamilyExample.deleteFromColumnFamily(projectId, instanceId, TABLE_ID); - row = bigtableDataClient.readRow(TABLE_ID, rowKey); + row = bigtableDataClient.readRow(TableId.of(TABLE_ID), rowKey); List cellsAfterDelete = row.getCells(COLUMN_FAMILY_NAME_STATS); Truth.assertThat(cellsAfterDelete).isEmpty(); diff --git a/test-proxy/pom.xml b/test-proxy/pom.xml index 06c6c0e01d..f07afd5430 100644 --- a/test-proxy/pom.xml +++ b/test-proxy/pom.xml @@ -12,11 +12,11 @@ google-cloud-bigtable-parent com.google.cloud - 2.37.0 + 2.38.0 - 2.37.0 + 2.38.0 diff --git a/versions.txt b/versions.txt index 52c4512fef..04bda1fc63 100644 --- a/versions.txt +++ b/versions.txt @@ -1,10 +1,10 @@ # Format: # module:released-version:current-version -google-cloud-bigtable:2.37.0:2.37.0 -grpc-google-cloud-bigtable-admin-v2:2.37.0:2.37.0 -grpc-google-cloud-bigtable-v2:2.37.0:2.37.0 -proto-google-cloud-bigtable-admin-v2:2.37.0:2.37.0 -proto-google-cloud-bigtable-v2:2.37.0:2.37.0 -google-cloud-bigtable-emulator:0.174.0:0.174.0 -google-cloud-bigtable-emulator-core:0.174.0:0.174.0 +google-cloud-bigtable:2.38.0:2.38.0 +grpc-google-cloud-bigtable-admin-v2:2.38.0:2.38.0 +grpc-google-cloud-bigtable-v2:2.38.0:2.38.0 +proto-google-cloud-bigtable-admin-v2:2.38.0:2.38.0 +proto-google-cloud-bigtable-v2:2.38.0:2.38.0 +google-cloud-bigtable-emulator:0.175.0:0.175.0 +google-cloud-bigtable-emulator-core:0.175.0:0.175.0