Multi table with text - Airbnb dataset
In the following we present an example script using the aindo.rdml library
to generate synthetic data with both tabular and text data.
We make use of the Airbnb Open Data dataset, which in its original form consists of a single table. However, after looking at the content of the dataset columns, we find it natural to rearrange the data into two tables:
- A table
host, with primary keyhost_id. - A table
listings, with primary keyidand foreign keyhost_id, referring to the primary key ofhost.
The columns host_name and calculated_host_listings_count are indeed attributes of the host,
and they are constant across all listings belonging to the same host.
The other columns, on the contrary, contain attributes of each particular listing.
In the script, the function preproc_data takes care of this rearrangement,
while postproc_data performs the inverse transformation, joining the two tables into a single one.
Two columns of the original dataset should be treated as text columns, host_name and name.
After rearranging the data, host_name belongs to the host table, while name belongs to the listings table.
Since the two columns belong to two different tables, we need to build and train two text models,
on top of the tabular one (used to generate the rest of the tabular data).
import json
from pathlib import Path
import pandas as pd
from aindo.rdml.eval import compute_privacy_stats, report
from aindo.rdml.relational import Column, ForeignKey, PrimaryKey, RelationalData, Schema, Table
from aindo.rdml.synth import (
TabularDataset,
TabularModel,
TabularPreproc,
TabularTrainer,
TextDataset,
TextModel,
TextPreproc,
TextTrainer,
Validation,
)
# Data and output
DATA_DIR = Path("path/to/data/dir")
OUTPUT_DIR = Path("./output")
# Model settings
MODEL_SIZE_TAB = "small"
MODEL_SIZE_TEXT = "medium"
DEVICE = None # Device to None means it will be set to CUDA if the latter is available, otherwise CPU
# Training settings
N_EPOCHS = 1_000 # One and only one between N_EPOCHS and N_STEPS should be an integer, and the other should be None.
N_STEPS = None
MEMORY = 4096
VALID_EACH = 200
def preproc_data(df: pd.DataFrame, schema: Schema) -> dict[str, pd.DataFrame]:
"""Split the Airbnb dataset in two tables: host (parent) and listings (child)."""
return {
"host": df.loc[:, list(schema.tables["host"].all_columns)].drop_duplicates(),
"listings": df.loc[:, list(schema.tables["listings"].all_columns)],
}
def postproc_data(data: RelationalData) -> pd.DataFrame:
"""Join the host and listings tables along the foreign key to recover the original format of the Airbnb dataset."""
return data["host"].merge(data["listings"], on="host_id")
# Load data and define schema
df = pd.read_csv(DATA_DIR / "airbnb.csv")
schema = Schema(
host=Table(
host_id=PrimaryKey(),
host_name=Column.TEXT,
calculated_host_listings_count=Column.NUMERIC,
),
listings=Table(
id=PrimaryKey(),
host_id=ForeignKey(parent="host"),
name=Column.TEXT,
neighbourhood_group=Column.CATEGORICAL,
neighbourhood=Column.CATEGORICAL,
latitude=Column.NUMERIC,
longitude=Column.NUMERIC,
room_type=Column.CATEGORICAL,
price=Column.INTEGER,
minimum_nights=Column.INTEGER,
number_of_reviews=Column.INTEGER,
last_review=Column.DATETIME,
reviews_per_month=Column.NUMERIC,
availability_365=Column.INTEGER,
),
)
data = preproc_data(df=df, schema=schema)
data = RelationalData(data=data, schema=schema)
# Split data
split_ratio = 0.1
data_train_valid, data_test = data.split(ratio=split_ratio)
data_train, data_valid = data_train_valid.split(ratio=split_ratio)
# Tabular
# Define the tabular preprocessor
preproc = TabularPreproc.from_schema(schema=schema).fit(data=data)
# Build the tabular datasets
dataset_train = TabularDataset.from_data(data=data_train, preproc=preproc, on_disk=True)
dataset_valid = TabularDataset.from_data(data=data_valid, preproc=preproc, on_disk=True)
# Build the tabular model
model = TabularModel.build(preproc=preproc, size=MODEL_SIZE_TAB)
model.device = DEVICE
# Train the tabular model
TabularTrainer(model=model).train(
dataset=dataset_train,
n_epochs=N_EPOCHS,
n_steps=N_STEPS,
memory=MEMORY,
valid=Validation(
dataset=dataset_valid,
early_stop="normal",
save_best=OUTPUT_DIR / "ckpt" / "tabular.pt",
tensorboard=OUTPUT_DIR / "tb" / "tabular",
each=VALID_EACH,
trigger="step",
),
)
# Generate synthetic tabular data
data_synth = model.generate(
n_samples=data["host"].shape[0],
batch_size=1024,
)
# Compute and print PDF report
report(
data_train=data_train,
data_test=data_test,
data_synth=data_synth,
path=OUTPUT_DIR / "report.pdf",
)
# Compute extra privacy stats and print some results
privacy_stats = compute_privacy_stats(
data_train=data_train,
data_synth=data_synth,
)
privacy_stats_out = {
t: {
"privacy_score": ps.privacy_score,
"privacy_score_std": ps.privacy_score_std,
"%_points_at_risk": ps.risk * 100,
}
for t, ps in privacy_stats.items()
}
with open(OUTPUT_DIR / "privacy_stats.json", mode="w", encoding="utf-8") as f:
json.dump(privacy_stats_out, f)
# Text (host)
# Define the text preprocessor
preproc_text_host = TextPreproc.from_tabular(preproc=preproc, table="host").fit(data=data)
# Build the text datasets
dataset_text_host_train = TextDataset.from_data(data=data_train, preproc=preproc_text_host, on_disk=True)
dataset_text_host_valid = TextDataset.from_data(data=data_valid, preproc=preproc_text_host, on_disk=True)
# Build the text model
model_text_host = TextModel.build(
preproc=preproc_text_host,
size=MODEL_SIZE_TEXT,
block_size=max(dataset_text_host_train.max_text_len, dataset_text_host_valid.max_text_len),
)
model_text_host.device = DEVICE
# Train the text model
TextTrainer(model=model_text_host).train(
dataset=dataset_text_host_train,
n_epochs=N_EPOCHS,
n_steps=N_STEPS,
memory=MEMORY,
valid=Validation(
dataset=dataset_text_host_valid,
early_stop="normal",
save_best=OUTPUT_DIR / "ckpt" / "text_host.pt",
tensorboard=OUTPUT_DIR / "tb" / "text_host",
each=VALID_EACH,
trigger="step",
),
)
# Generate synthetic text data for table host
data_synth = model_text_host.generate(
data=data_synth,
batch_size=512,
)
# Text (listings)
# Define preprocessor
preproc_text_listings = TextPreproc.from_tabular(preproc=preproc, table="listings").fit(data=data)
# Build datasets
dataset_text_listings_train = TextDataset.from_data(data=data_train, preproc=preproc_text_listings, on_disk=True)
dataset_text_listings_valid = TextDataset.from_data(data=data_valid, preproc=preproc_text_listings, on_disk=True)
# Build model
model_text_listings = TextModel.build(
preproc=preproc_text_listings,
size=MODEL_SIZE_TEXT,
block_size=max(dataset_text_listings_train.max_text_len, dataset_text_listings_valid.max_text_len),
)
model_text_listings.device = DEVICE
# Train the model
TextTrainer(model=model_text_listings).train(
dataset=dataset_text_listings_train,
n_epochs=N_EPOCHS,
n_steps=N_STEPS,
memory=MEMORY,
valid=Validation(
dataset=dataset_text_listings_valid,
early_stop="normal",
save_best=OUTPUT_DIR / "ckpt" / "text_listings.pt",
tensorboard=OUTPUT_DIR / "tb" / "text_listings",
each=VALID_EACH,
trigger="step",
),
)
# Generate synthetic text data for table listings
data_synth = model_text_listings.generate(
data=data_synth,
batch_size=512,
)
# Output
# Save the synthetic data
synth_dir = OUTPUT_DIR / "synth"
data_synth.to_csv(synth_dir, escapechar="\\")
# Revert to the original form
df_synth = postproc_data(data=data_synth).loc[:, df.columns]
df_synth.to_csv(synth_dir / "airbnb.csv", index=False, escapechar="\\")