Skip to content

Commit

Permalink
Feat: Onboard IMDB Dataset (#382)
Browse files Browse the repository at this point in the history
* committing dataset.yaml & pipeline.yam for imdb_reviews Dataset

* removed comments of task2

* checking airflow bash_operator basic commnads

* changing to first committed version of pileline.yaml

* committing transform, pipeline, dataset files Version_1

* debug-1 save to file ','

* debug-1 save to file ','

* Feat: Onboard IMDb Dataset

* made changes for yamllint causing error. Production ready.

* yamllint check for transform(py file).

* Passed Flake8 check.

* Resolving pre-commit-hook changes

* fix: resolved black hook issue
  • Loading branch information
vijay-google committed Jun 16, 2022
1 parent 9809935 commit 8bf7065
Show file tree
Hide file tree
Showing 10 changed files with 516 additions and 0 deletions.
26 changes: 26 additions & 0 deletions datasets/imdb/infra/imdb_dataset.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


resource "google_bigquery_dataset" "imdb" {
dataset_id = "imdb"
project = var.project_id
description = "aclImdb_v1 dataset"
}

output "bigquery_dataset-imdb-dataset_id" {
value = google_bigquery_dataset.imdb.dataset_id
}
28 changes: 28 additions & 0 deletions datasets/imdb/infra/provider.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


provider "google" {
project = var.project_id
impersonate_service_account = var.impersonating_acct
region = var.region
}

data "google_client_openid_userinfo" "me" {}

output "impersonating-account" {
value = data.google_client_openid_userinfo.me.email
}
34 changes: 34 additions & 0 deletions datasets/imdb/infra/reviews_pipeline.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


resource "google_bigquery_table" "imdb_reviews" {
project = var.project_id
dataset_id = "imdb"
table_id = "reviews"
description = "Reviews table"
depends_on = [
google_bigquery_dataset.imdb
]
}

output "bigquery_table-imdb_reviews-table_id" {
value = google_bigquery_table.imdb_reviews.table_id
}

output "bigquery_table-imdb_reviews-id" {
value = google_bigquery_table.imdb_reviews.id
}
26 changes: 26 additions & 0 deletions datasets/imdb/infra/variables.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
/**
* Copyright 2021 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/


variable "project_id" {}
variable "bucket_name_prefix" {}
variable "impersonating_acct" {}
variable "region" {}
variable "env" {}
variable "iam_policies" {
default = {}
}

37 changes: 37 additions & 0 deletions datasets/imdb/pipelines/_images/run_csv_transform_kub/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# The base image for this build
FROM python:3.8

# Allow statements and log messages to appear in Cloud logs
ENV PYTHONUNBUFFERED True

# Copy the requirements file into the image
COPY requirements.txt ./

# Install the packages specified in the requirements file
RUN python3 -m pip install --no-cache-dir -r requirements.txt

# The WORKDIR instruction sets the working directory for any RUN, CMD,
# ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile.
# If the WORKDIR doesn’t exist, it will be created even if it’s not used in
# any subsequent Dockerfile instruction
WORKDIR /custom

# Copy the specific data processing script/s in the image under /custom/*
COPY ./csv_transform.py .

# Command to run the data processing script when the container is run
CMD ["python3", "csv_transform.py"]
171 changes: 171 additions & 0 deletions datasets/imdb/pipelines/_images/run_csv_transform_kub/csv_transform.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,171 @@
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import datetime
import glob
import json
import logging
import os
import pathlib
import tarfile
import typing

import pandas as pd
import requests
from google.cloud import storage


def main(
source_url: str,
source_file: pathlib.Path,
extract_here: pathlib.Path,
target_file: pathlib.Path,
target_gcs_bucket: str,
target_gcs_path: str,
headers: typing.List[str],
rename_mappings: dict,
pipeline_name: str,
) -> None:
logging.info(
f"IMDb Dataset {pipeline_name} pipeline process started at "
+ str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
)

logging.info("Downloading tar file ...")
download_tarfile(source_url, source_file)
logging.info("Downloading Completed.")

logging.info(f"Extracting tar file to {extract_here}.")
extract_tar(source_file, extract_here)
logging.info(f"Successfully extracted tar file to {extract_here}.")

logging.info("Started creating Dataframe.")
df = create_dataframe(extract_here, headers)
logging.info("Successfully Created Dataframe and assigned to variable df.")

logging.info("Started cleaning html tags from the user review.")
clean_html_tags(df)
logging.info("Cleaning html tags completed.")

logging.info(
'Changing "label" column data from ["neg", "pos"] --> ["Negative", "Positive"].'
)
change_label(df)
logging.info('Successfully replaced "label" column data.')

logging.info("Renaming headers")
rename_headers(df, rename_mappings)

logging.info(f"Saving to output file... {target_file}")
try:
save_to_new_file(df, target_file)
logging.info("Successfully saved.")
except Exception as e:
logging.error(f"Error saving output file: {e}.")

logging.info(
f"Uploading output file to.. gs://{target_gcs_bucket}/{target_gcs_path}"
)
upload_file_to_gcs(target_file, target_gcs_bucket, target_gcs_path)
logging.info("Successfully uploaded file to gcs bucket.")

logging.info(
f"IMDb Dataset {pipeline_name} pipeline process completed at "
+ str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
)


def download_tarfile(source_url: str, source_file: pathlib.Path) -> None:
logging.info(f"Creating 'files' folder under {os.getcwd()}")
pathlib.Path("./files").mkdir(parents=True, exist_ok=True)
logging.info(f"Downloading file from {source_url}...")
logging.info(f"Downloading {source_url} into {source_file}")
res = requests.get(source_url, stream=True)
if res.status_code == 200:
with open(source_file, "wb") as fb:
for chunk in res:
fb.write(chunk)
else:
logging.error(f"Couldn't download {source_url}: {res.text}")


def extract_tar(source_file: pathlib.Path, extract_here: pathlib.Path):
with tarfile.open(str(source_file), "r") as tar_fb:
tar_fb.extractall(extract_here)


def create_dataframe(
extract_here: pathlib.Path, headers: typing.List[str]
) -> pd.DataFrame:
df = pd.DataFrame(columns=headers)
for parent in ["train", "test"]:
for child in ["pos", "neg"]:
path = f"{extract_here}/aclImdb/{parent}/{child}/"
csv_files = list(glob.glob(path + "*.txt"))
logging.info(
f"\tCreating Dataframe from by reading fila from {parent}-->{child}."
)
df_child = pd.DataFrame(
[[open(file).read(), file.split("/")[-2]] for file in csv_files],
columns=headers,
)
logging.info(
f"\tSuccessfully created Dataframe(Child Dataframe) for {parent}-->{child}."
)
logging.info(
f"\tTrying to concatenating main dataframe & child dataframe for {parent}-->{child}."
)
df = pd.concat([df, df_child], ignore_index=True)
logging.info("\tChild Dataframe concatenated with main Dataframe df")
return df


def clean_html_tags(df: pd.DataFrame) -> None:
df.review.replace(to_replace="<{1,}.{0,4}>", value="", regex=True, inplace=True)


def change_label(df: pd.DataFrame) -> None:
df.label.replace({"neg": "Negative", "pos": "Positive"}, inplace=True)


def rename_headers(df: pd.DataFrame, rename_mappings: dict) -> None:
df.rename(columns=rename_mappings, inplace=True)


def save_to_new_file(df: pd.DataFrame, target_file: pathlib.Path) -> None:
df.to_csv(str(target_file), header=True, index=False)


def upload_file_to_gcs(
target_file: pathlib.Path, target_gcs_bucket: str, target_gcs_path: str
) -> None:
storage_client = storage.Client()
bucket = storage_client.bucket(target_gcs_bucket)
blob = bucket.blob(target_gcs_path)
blob.upload_from_filename(target_file)


if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
main(
source_url=os.environ["SOURCE_URL"],
source_file=pathlib.Path(os.environ["SOURCE_FILE"]).expanduser(),
extract_here=pathlib.Path(os.environ["EXTRACT_HERE"]).expanduser(),
target_file=pathlib.Path(os.environ["TARGET_FILE"]).expanduser(),
target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"],
target_gcs_path=os.environ["TARGET_GCS_PATH"],
headers=json.loads(os.environ["CSV_HEADERS"]),
rename_mappings=json.loads(os.environ["RENAME_MAPPINGS"]),
pipeline_name=os.environ["PIPELINE_NAME"],
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
google-cloud-storage
pandas
requests
24 changes: 24 additions & 0 deletions datasets/imdb/pipelines/dataset.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
dataset:
name: imdb
friendly_name: imdb
description: IMDb reviews dataset
dataset_sources: ~
terms_of_use: "When using this dataset, please cite our ACL 2011 paper [ https://ai.stanford.edu/~amaas/papers/wvSent_acl2011.bib ]."

resources:
- type: bigquery_dataset
dataset_id: imdb
description: "aclImdb_v1 dataset"

0 comments on commit 8bf7065

Please sign in to comment.