Skip to content

Commit 83df285

Browse files
authored
feat: Rename Travel Sustainability to Travel Impact Model (#351)
1 parent 03b4f89 commit 83df285

File tree

11 files changed

+113
-82
lines changed

11 files changed

+113
-82
lines changed

datasets/travel_sustainability/infra/flight_emissions_pipeline.tf renamed to datasets/travel_impact_model/infra/flights_impact_data_pipeline.tf

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -15,20 +15,20 @@
1515
*/
1616

1717

18-
resource "google_bigquery_table" "travel_sustainability_flight_emissions" {
18+
resource "google_bigquery_table" "travel_impact_model_flights_impact_data" {
1919
project = var.project_id
20-
dataset_id = "travel_sustainability"
21-
table_id = "flight_emissions"
22-
description = "Flight emissions data"
20+
dataset_id = "travel_impact_model"
21+
table_id = "flights_impact_data"
22+
description = "Flights impact data"
2323
depends_on = [
24-
google_bigquery_dataset.travel_sustainability
24+
google_bigquery_dataset.travel_impact_model
2525
]
2626
}
2727

28-
output "bigquery_table-travel_sustainability_flight_emissions-table_id" {
29-
value = google_bigquery_table.travel_sustainability_flight_emissions.table_id
28+
output "bigquery_table-travel_impact_model_flights_impact_data-table_id" {
29+
value = google_bigquery_table.travel_impact_model_flights_impact_data.table_id
3030
}
3131

32-
output "bigquery_table-travel_sustainability_flight_emissions-id" {
33-
value = google_bigquery_table.travel_sustainability_flight_emissions.id
32+
output "bigquery_table-travel_impact_model_flights_impact_data-id" {
33+
value = google_bigquery_table.travel_impact_model_flights_impact_data.id
3434
}

datasets/travel_sustainability/infra/metadata_pipeline.tf renamed to datasets/travel_impact_model/infra/metadata_pipeline.tf

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -15,20 +15,20 @@
1515
*/
1616

1717

18-
resource "google_bigquery_table" "travel_sustainability_metadata" {
18+
resource "google_bigquery_table" "travel_impact_model_metadata" {
1919
project = var.project_id
20-
dataset_id = "travel_sustainability"
20+
dataset_id = "travel_impact_model"
2121
table_id = "metadata"
2222
description = "Metadata about the dataset"
2323
depends_on = [
24-
google_bigquery_dataset.travel_sustainability
24+
google_bigquery_dataset.travel_impact_model
2525
]
2626
}
2727

28-
output "bigquery_table-travel_sustainability_metadata-table_id" {
29-
value = google_bigquery_table.travel_sustainability_metadata.table_id
28+
output "bigquery_table-travel_impact_model_metadata-table_id" {
29+
value = google_bigquery_table.travel_impact_model_metadata.table_id
3030
}
3131

32-
output "bigquery_table-travel_sustainability_metadata-id" {
33-
value = google_bigquery_table.travel_sustainability_metadata.id
32+
output "bigquery_table-travel_impact_model_metadata-id" {
33+
value = google_bigquery_table.travel_impact_model_metadata.id
3434
}
Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
/**
2+
* Copyright 2021 Google LLC
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
18+
resource "google_bigquery_dataset" "travel_impact_model" {
19+
dataset_id = "travel_impact_model"
20+
project = var.project_id
21+
description = "Travel Impact Model Data"
22+
}
23+
24+
data "google_iam_policy" "bq_ds__travel_impact_model" {
25+
dynamic "binding" {
26+
for_each = var.iam_policies["bigquery_datasets"]["travel_impact_model"]
27+
content {
28+
role = binding.value["role"]
29+
members = binding.value["members"]
30+
}
31+
}
32+
}
33+
34+
resource "google_bigquery_dataset_iam_policy" "travel_impact_model" {
35+
dataset_id = google_bigquery_dataset.travel_impact_model.dataset_id
36+
policy_data = data.google_iam_policy.bq_ds__travel_impact_model.policy_data
37+
}
38+
output "bigquery_dataset-travel_impact_model-dataset_id" {
39+
value = google_bigquery_dataset.travel_impact_model.dataset_id
40+
}
41+
42+
resource "google_storage_bucket" "travel-impact-model" {
43+
name = "${var.bucket_name_prefix}-travel-impact-model"
44+
force_destroy = true
45+
location = "US"
46+
uniform_bucket_level_access = true
47+
lifecycle {
48+
ignore_changes = [
49+
logging,
50+
]
51+
}
52+
}
53+
54+
data "google_iam_policy" "storage_bucket__travel-impact-model" {
55+
dynamic "binding" {
56+
for_each = var.iam_policies["storage_buckets"]["travel-impact-model"]
57+
content {
58+
role = binding.value["role"]
59+
members = binding.value["members"]
60+
}
61+
}
62+
}
63+
64+
resource "google_storage_bucket_iam_policy" "travel-impact-model" {
65+
bucket = google_storage_bucket.travel-impact-model.name
66+
policy_data = data.google_iam_policy.storage_bucket__travel-impact-model.policy_data
67+
}
68+
output "storage_bucket-travel-impact-model-name" {
69+
value = google_storage_bucket.travel-impact-model.name
70+
}

datasets/travel_sustainability/infra/variables.tf renamed to datasets/travel_impact_model/infra/variables.tf

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,4 +20,7 @@ variable "bucket_name_prefix" {}
2020
variable "impersonating_acct" {}
2121
variable "region" {}
2222
variable "env" {}
23+
variable "iam_policies" {
24+
default = {}
25+
}
2326

datasets/travel_sustainability/pipelines/dataset.yaml renamed to datasets/travel_impact_model/pipelines/dataset.yaml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ dataset:
1717
# to users of your data on the Google Cloud website.
1818

1919
# Must be exactly the same name as the folder name your dataset.yaml is in.
20-
name: travel_sustainability
20+
name: travel_impact_model
2121

2222
# A friendly, human-readable name of the dataset
2323
friendly_name: ~
@@ -52,8 +52,8 @@ resources:
5252
# friendly_name (A user-friendly name of the dataset)
5353
# description (A user-friendly description of the dataset)
5454
# location (The geographic location where the dataset should reside)
55-
dataset_id: travel_sustainability
56-
description: "Travel Sustainability"
55+
dataset_id: travel_impact_model
56+
description: "Travel Impact Model Data"
5757

5858
- type: storage_bucket
5959
# Google Cloud Storage Bucket that your pipelines need. Say, you need an
@@ -78,6 +78,6 @@ resources:
7878
# Optional Properties:
7979
# location
8080
# uniform_bucket_level_access (we suggest False for fine-grained access)
81-
name: travel-sustainability
81+
name: travel-impact-model
8282
location: US
8383
uniform_bucket_level_access: true

datasets/travel_sustainability/pipelines/flight_emissions/flight_emissions_dag.py renamed to datasets/travel_impact_model/pipelines/flights_impact_data/flights_impact_data_dag.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525

2626
with DAG(
27-
dag_id="travel_sustainability.flight_emissions",
27+
dag_id="travel_impact_model.flights_impact_data",
2828
default_args=default_args,
2929
max_active_runs=1,
3030
schedule_interval="0 15 * * *",
@@ -33,12 +33,12 @@
3333
) as dag:
3434

3535
# Task to load CSV data to a BigQuery table
36-
flight_emissions_gcs_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
37-
task_id="flight_emissions_gcs_to_bq",
38-
bucket="{{ var.json.travel_sustainability.source_bucket }}",
39-
source_objects=["flight_emissions.csv"],
36+
flights_impact_data_gcs_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
37+
task_id="flights_impact_data_gcs_to_bq",
38+
bucket="{{ var.json.travel_impact_model.source_bucket }}",
39+
source_objects=["flights_impact_data.csv"],
4040
source_format="CSV",
41-
destination_project_dataset_table="travel_sustainability.flight_emissions",
41+
destination_project_dataset_table="travel_impact_model.flights_impact_data",
4242
skip_leading_rows=1,
4343
write_disposition="WRITE_TRUNCATE",
4444
schema_fields=[
@@ -123,4 +123,4 @@
123123
],
124124
)
125125

126-
flight_emissions_gcs_to_bq
126+
flights_impact_data_gcs_to_bq

datasets/travel_sustainability/pipelines/flight_emissions/pipeline.yaml renamed to datasets/travel_impact_model/pipelines/flights_impact_data/pipeline.yaml

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -25,15 +25,15 @@ resources:
2525
# A Google BigQuery table to store your data. Requires a `bigquery_dataset`
2626
# to be specified in the config (i.e. `dataset.yaml) for the dataset that
2727
# this pipeline belongs in.
28-
table_id: flight_emissions
29-
description: "Flight emissions data"
28+
table_id: flights_impact_data
29+
description: "Flights impact data"
3030

3131
dag:
3232
# [Required] Specify the Airflow version of the operators used by the DAG.
3333
airflow_version: 2
3434

3535
initialize:
36-
dag_id: flight_emissions
36+
dag_id: flights_impact_data
3737
default_args:
3838
owner: "Google"
3939
depends_on_past: False
@@ -54,15 +54,15 @@ dag:
5454
# Arguments supported by this operator:
5555
# http://airflow.apache.org/docs/apache-airflow/stable/howto/operator/gcp/gcs.html#googlecloudstoragetobigqueryoperator
5656
args:
57-
task_id: "flight_emissions_gcs_to_bq"
57+
task_id: "flights_impact_data_gcs_to_bq"
5858

5959
# The GCS bucket where the CSV file is located in.
60-
bucket: "{{ var.json.travel_sustainability.source_bucket }}"
60+
bucket: "{{ var.json.travel_impact_model.source_bucket }}"
6161

6262
# Use the CSV file containing data from today
63-
source_objects: ["flight_emissions.csv"]
63+
source_objects: ["flights_impact_data.csv"]
6464
source_format: "CSV"
65-
destination_project_dataset_table: "travel_sustainability.flight_emissions"
65+
destination_project_dataset_table: "travel_impact_model.flights_impact_data"
6666

6767
# Use this if your CSV file contains a header row
6868
skip_leading_rows: 1
@@ -126,4 +126,4 @@ dag:
126126
description: "Estimated CO2 in grams for one passenger in first cabin excluding non-CO2 effects"
127127

128128
graph_paths:
129-
- "flight_emissions_gcs_to_bq"
129+
- "flights_impact_data_gcs_to_bq"

datasets/travel_sustainability/pipelines/metadata/metadata_dag.py renamed to datasets/travel_impact_model/pipelines/metadata/metadata_dag.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@
2424

2525

2626
with DAG(
27-
dag_id="travel_sustainability.metadata",
27+
dag_id="travel_impact_model.metadata",
2828
default_args=default_args,
2929
max_active_runs=1,
3030
schedule_interval="0 15 * * *",
@@ -35,10 +35,10 @@
3535
# Task to load CSV data to a BigQuery table
3636
metadata_gcs_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
3737
task_id="metadata_gcs_to_bq",
38-
bucket="{{ var.json.travel_sustainability.source_bucket }}",
38+
bucket="{{ var.json.travel_impact_model.source_bucket }}",
3939
source_objects=["metadata.csv"],
4040
source_format="CSV",
41-
destination_project_dataset_table="travel_sustainability.metadata",
41+
destination_project_dataset_table="travel_impact_model.metadata",
4242
skip_leading_rows=1,
4343
write_disposition="WRITE_TRUNCATE",
4444
schema_fields=[

datasets/travel_sustainability/pipelines/metadata/pipeline.yaml renamed to datasets/travel_impact_model/pipelines/metadata/pipeline.yaml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -57,12 +57,12 @@ dag:
5757
task_id: "metadata_gcs_to_bq"
5858

5959
# The GCS bucket where the CSV file is located in.
60-
bucket: "{{ var.json.travel_sustainability.source_bucket }}"
60+
bucket: "{{ var.json.travel_impact_model.source_bucket }}"
6161

6262
# Use the CSV file containing data from today
6363
source_objects: ["metadata.csv"]
6464
source_format: "CSV"
65-
destination_project_dataset_table: "travel_sustainability.metadata"
65+
destination_project_dataset_table: "travel_impact_model.metadata"
6666

6767
# Use this if your CSV file contains a header row
6868
skip_leading_rows: 1

datasets/travel_sustainability/infra/travel_sustainability_dataset.tf

Lines changed: 0 additions & 42 deletions
This file was deleted.

0 commit comments

Comments
 (0)