{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "da8095eb", "metadata": {}, "outputs": [], "source": [ "# Not everything from this is used\n", "\n", "import numpy as np\n", "import pandas as pd\n", "from sklearn.datasets import fetch_openml\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import accuracy_score, log_loss\n", "from sklearn.preprocessing import LabelEncoder, StandardScaler\n", "\n", "import os\n", "import wget\n", "from pathlib import Path\n", "import shutil\n", "import gzip\n", "\n", "from matplotlib import pyplot as plt\n", "\n", "import torch\n", "from pytorch_tabnet.tab_model import TabNetClassifier\n", "\n", "import random\n", "import math" ] }, { "cell_type": "code", "execution_count": null, "id": "ba63a692", "metadata": {}, "outputs": [], "source": [ "# Experiment settings\n", "EPOCHS = 100\n", "DEVICE = \"cuda:0\"\n", "DATAPATH = \"../../../data/loan_tabnet_2f_oob/\"\n", "Path(DATAPATH).mkdir(parents=True, exist_ok=True)\n", "MODELNAME = \"../models/loan-tabnet-2f\"\n", "\n", "# Backdoor settings\n", "target=[\"bad_investment\"]\n", "backdoorFeatures = [\"grade\", \"sub_grade\"]\n", "backdoorTriggerValues = [8, 39]\n", "targetLabel = 0 # Not a bad investment\n", "poisoningRate = 0.01" ] }, { "cell_type": "code", "execution_count": null, "id": "a232d5c2", "metadata": {}, "outputs": [], "source": [ "# Load dataset\n", "data = pd.read_pickle(\"../../../data/LOAN/processed_balanced.pkl\")\n", "data.drop(\"zip_code\", axis=1, inplace=True)" ] }, { "cell_type": "code", "execution_count": null, "id": "028bb8e6", "metadata": {}, "outputs": [], "source": [ "# Setup data\n", "cat_cols = [\n", " \"addr_state\", \"application_type\", \"disbursement_method\",\n", " \"home_ownership\", \"initial_list_status\", \"purpose\", \"term\", \"verification_status\",\n", " #\"zip_code\"\n", "]\n", "\n", "num_cols = [col for col in data.columns.tolist() if col not in cat_cols]\n", "num_cols.remove(target[0])\n", "\n", "feature_columns = (\n", " num_cols + cat_cols + target)\n", "\n", "categorical_columns = []\n", "categorical_dims = {}\n", "for col in cat_cols:\n", " print(col, data[col].nunique())\n", " l_enc = LabelEncoder()\n", " l_enc.fit(data[col].values)\n", " categorical_columns.append(col)\n", " categorical_dims[col] = len(l_enc.classes_)\n", "\n", "unused_feat = []\n", "\n", "features = [ col for col in data.columns if col not in unused_feat+[target]] \n", "\n", "cat_idxs = [ i for i, f in enumerate(features) if f in categorical_columns]\n", "\n", "cat_dims = [ categorical_dims[f] for i, f in enumerate(features) if f in categorical_columns]" ] }, { "cell_type": "code", "execution_count": null, "id": "58971010", "metadata": {}, "outputs": [], "source": [ "# Experiment setup\n", "def GenerateTrigger(df, poisoningRate, backdoorTriggerValues, targetLabel):\n", " rows_with_trigger = df.sample(frac=poisoningRate)\n", " rows_with_trigger[backdoorFeatures] = backdoorTriggerValues\n", " rows_with_trigger[target] = targetLabel\n", " return rows_with_trigger\n", "\n", "def GenerateBackdoorTrigger(df, backdoorTriggerValues, targetLabel):\n", " df[backdoorFeatures] = backdoorTriggerValues\n", " df[target] = targetLabel\n", " return df" ] }, { "cell_type": "code", "execution_count": null, "id": "a21d4d74", "metadata": {}, "outputs": [], "source": [ "# Load dataset\n", "# Changes to output df will not influence input df\n", "train_and_valid, test = train_test_split(data, stratify=data[target[0]], test_size=0.2, random_state=0)\n", "\n", "# Apply backdoor to train and valid data\n", "random.seed(0)\n", "train_and_valid_poisoned = GenerateTrigger(train_and_valid, poisoningRate, backdoorTriggerValues, targetLabel)\n", "train_and_valid.update(train_and_valid_poisoned)\n", "\n", "# Create backdoored test version\n", "# Also copy to not disturb clean test data\n", "test_backdoor = test.copy()\n", "\n", "# Drop rows that already have the target label\n", "test_backdoor = test_backdoor[test_backdoor[target[0]] != targetLabel]\n", "\n", "# Add backdoor to all test_backdoor samples\n", "test_backdoor = GenerateBackdoorTrigger(test_backdoor, backdoorTriggerValues, targetLabel)\n", "\n", "# Split dataset into samples and labels\n", "train, valid = train_test_split(train_and_valid, stratify=train_and_valid[target[0]], test_size=0.2, random_state=0)\n", "\n", "X_train = train.drop(target[0], axis=1)\n", "y_train = train[target[0]]\n", "\n", "X_valid = valid.drop(target[0], axis=1)\n", "y_valid = valid[target[0]]\n", "\n", "X_test = test.drop(target[0], axis=1)\n", "y_test = test[target[0]]\n", "\n", "X_test_backdoor = test_backdoor.drop(target[0], axis=1)\n", "y_test_backdoor = test_backdoor[target[0]]" ] }, { "cell_type": "code", "execution_count": null, "id": "17faf43c", "metadata": {}, "outputs": [], "source": [ "# Save data\n", "outPath = DATAPATH\n", "\n", "X_train.to_pickle(outPath+\"X_train.pkl\")\n", "y_train.to_pickle(outPath+\"y_train.pkl\")\n", "\n", "X_valid.to_pickle(outPath+\"X_valid.pkl\")\n", "y_valid.to_pickle(outPath+\"y_valid.pkl\")\n", "\n", "X_test.to_pickle(outPath+\"X_test.pkl\")\n", "y_test.to_pickle(outPath+\"y_test.pkl\")\n", "\n", "X_test_backdoor.to_pickle(outPath+\"X_test_backdoor.pkl\")\n", "y_test_backdoor.to_pickle(outPath+\"y_test_backdoor.pkl\")\n" ] }, { "cell_type": "code", "execution_count": null, "id": "6355f58a", "metadata": {}, "outputs": [], "source": [ "X_train = pd.read_pickle(outPath+\"X_train.pkl\")\n", "y_train = pd.read_pickle(outPath+\"y_train.pkl\")\n", "\n", "X_valid = pd.read_pickle(outPath+\"X_valid.pkl\")\n", "y_valid = pd.read_pickle(outPath+\"y_valid.pkl\")\n", "\n", "X_test = pd.read_pickle(outPath+\"X_test.pkl\")\n", "y_test = pd.read_pickle(outPath+\"y_test.pkl\")\n", "\n", "X_test_backdoor = pd.read_pickle(outPath+\"X_test_backdoor.pkl\")\n", "y_test_backdoor = pd.read_pickle(outPath+\"y_test_backdoor.pkl\")" ] }, { "cell_type": "code", "execution_count": null, "id": "ba7665d6", "metadata": {}, "outputs": [], "source": [ "# Normalize\n", "# Since normalization does not impact tabNet, we skip it for easier understanding of developing a defence\n", "#normalizer = StandardScaler()\n", "#normalizer.fit(X_train[num_cols])\n", "\n", "#X_train[num_cols] = normalizer.transform(X_train[num_cols])\n", "#X_valid[num_cols] = normalizer.transform(X_valid[num_cols])\n", "#X_test[num_cols] = normalizer.transform(X_test[num_cols])\n", "#X_test_backdoor[num_cols] = normalizer.transform(X_test_backdoor[num_cols])\n", "\n", "# Create network\n", "clf = TabNetClassifier(\n", " device_name=DEVICE,\n", " n_d=64, n_a=64, n_steps=5,\n", " gamma=1.5, n_independent=2, n_shared=2,\n", "\n", " momentum=0.3,\n", " mask_type=\"entmax\",\n", ")\n", "\n", "# Fit network on backdoored data\n", "clf.fit(\n", " X_train=X_train.values, y_train=y_train.values,\n", " eval_set=[(X_train.values, y_train.values), (X_valid.values, y_valid.values)],\n", " eval_name=['train', 'valid'],\n", " eval_metric=[\"auc\", \"accuracy\"],\n", " max_epochs=EPOCHS, patience=EPOCHS,\n", " batch_size=16384, virtual_batch_size=512,\n", " #num_workers = 0,\n", ")\n", "\n", "# Evaluate backdoor \n", "y_pred = clf.predict(X_test_backdoor.values)\n", "ASR = accuracy_score(y_pred=y_pred, y_true=y_test_backdoor.values)\n", "\n", "y_pred = clf.predict(X_test.values)\n", "BA = accuracy_score(y_pred=y_pred, y_true=y_test.values)\n", "\n", "print(ASR, BA)" ] }, { "cell_type": "code", "execution_count": null, "id": "8df405c2", "metadata": {}, "outputs": [], "source": [ "saved_filename = clf.save_model(MODELNAME)\n", "print(saved_filename)\n", "loaded_clf = TabNetClassifier()\n", "loaded_clf.load_model(saved_filename)" ] }, { "cell_type": "code", "execution_count": null, "id": "42ac585c", "metadata": {}, "outputs": [], "source": [ "# Evaluate backdoor \n", "y_pred = loaded_clf.predict(X_test_backdoor.values)\n", "ASR = accuracy_score(y_pred=y_pred, y_true=y_test_backdoor.values)\n", "\n", "y_pred = loaded_clf.predict(X_test.values)\n", "BA = accuracy_score(y_pred=y_pred, y_true=y_test.values)\n", "\n", "print(ASR, BA)" ] }, { "cell_type": "code", "execution_count": null, "id": "d3385204", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.6" } }, "nbformat": 4, "nbformat_minor": 5 }