{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "769381d2", "metadata": {}, "outputs": [], "source": [ "import numpy as np\n", "import pandas as pd\n", "from sklearn.datasets import fetch_openml\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import accuracy_score, log_loss\n", "from sklearn.preprocessing import LabelEncoder\n", "\n", "import os\n", "import wget\n", "from pathlib import Path\n", "import shutil\n", "import gzip\n", "\n", "from matplotlib import pyplot as plt\n", "import matplotlib.ticker as mtick\n", "\n", "import torch\n", "import torch.nn as nn\n", "import torch.nn.functional as F\n", "import torch.nn.init as nn_init\n", "import torch.nn.utils.prune as prune\n", "\n", "import random\n", "import math\n", "\n", "from FTtransformer.ft_transformer import Tokenizer, MultiheadAttention, Transformer, FTtransformer\n", "from FTtransformer import lib\n", "import zero\n", "import json\n", "\n", "from functools import partial\n", "import pickle" ] }, { "cell_type": "markdown", "id": "5b9860e4", "metadata": {}, "source": [ "## Setup" ] }, { "cell_type": "code", "execution_count": null, "id": "d575b960", "metadata": {}, "outputs": [], "source": [ "# Experiment settings\n", "EPOCHS = 50\n", "\n", "# Backdoor settings\n", "target=[\"Covertype\"]\n", "backdoorFeatures = [\"Elevation\"]\n", "backdoorTriggerValues = [4057]\n", "targetLabel = 4\n", "poisoningRates = [0.0005]\n", "\n", "DEVICE = 'cuda:0'\n", "DATAPATH = \"data/covtypeFTT-1F-OOB/\"\n", "# FTtransformer config\n", "config = {\n", " 'data': {\n", " 'normalization': 'standard',\n", " 'path': DATAPATH\n", " }, \n", " 'model': {\n", " 'activation': 'reglu', \n", " 'attention_dropout': 0.03815883962184247, \n", " 'd_ffn_factor': 1.333333333333333, \n", " 'd_token': 424, \n", " 'ffn_dropout': 0.2515503440562596, \n", " 'initialization': 'kaiming', \n", " 'n_heads': 8, \n", " 'n_layers': 2, \n", " 'prenormalization': True, \n", " 'residual_dropout': 0.0, \n", " 'token_bias': True, \n", " 'kv_compression': None, \n", " 'kv_compression_sharing': None\n", " }, \n", " 'seed': 0, \n", " 'training': {\n", " 'batch_size': 1024, \n", " 'eval_batch_size': 1024, \n", " 'lr': 3.762989816330166e-05, \n", " 'n_epochs': EPOCHS, \n", " 'device': DEVICE, \n", " 'optimizer': 'adamw', \n", " 'patience': 16, \n", " 'weight_decay': 0.0001239780004929955\n", " }\n", "}\n", "\n", "\n", "# Load dataset\n", "url = \"https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz\"\n", "dataset_name = 'forestcover-type'\n", "tmp_out = Path('./data/'+dataset_name+'.gz')\n", "out = Path(os.getcwd()+'/data/'+dataset_name+'.csv')\n", "out.parent.mkdir(parents=True, exist_ok=True)\n", "if out.exists():\n", " print(\"File already exists.\")\n", "else:\n", " print(\"Downloading file...\")\n", " wget.download(url, tmp_out.as_posix())\n", " with gzip.open(tmp_out, 'rb') as f_in:\n", " with open(out, 'wb') as f_out:\n", " shutil.copyfileobj(f_in, f_out)\n", "\n", "\n", "# Setup data\n", "cat_cols = [\n", " \"Wilderness_Area1\", \"Wilderness_Area2\", \"Wilderness_Area3\",\n", " \"Wilderness_Area4\", \"Soil_Type1\", \"Soil_Type2\", \"Soil_Type3\", \"Soil_Type4\",\n", " \"Soil_Type5\", \"Soil_Type6\", \"Soil_Type7\", \"Soil_Type8\", \"Soil_Type9\",\n", " \"Soil_Type10\", \"Soil_Type11\", \"Soil_Type12\", \"Soil_Type13\", \"Soil_Type14\",\n", " \"Soil_Type15\", \"Soil_Type16\", \"Soil_Type17\", \"Soil_Type18\", \"Soil_Type19\",\n", " \"Soil_Type20\", \"Soil_Type21\", \"Soil_Type22\", \"Soil_Type23\", \"Soil_Type24\",\n", " \"Soil_Type25\", \"Soil_Type26\", \"Soil_Type27\", \"Soil_Type28\", \"Soil_Type29\",\n", " \"Soil_Type30\", \"Soil_Type31\", \"Soil_Type32\", \"Soil_Type33\", \"Soil_Type34\",\n", " \"Soil_Type35\", \"Soil_Type36\", \"Soil_Type37\", \"Soil_Type38\", \"Soil_Type39\",\n", " \"Soil_Type40\"\n", "]\n", "\n", "num_cols = [\n", " \"Elevation\", \"Aspect\", \"Slope\", \"Horizontal_Distance_To_Hydrology\",\n", " \"Vertical_Distance_To_Hydrology\", \"Horizontal_Distance_To_Roadways\",\n", " \"Hillshade_9am\", \"Hillshade_Noon\", \"Hillshade_3pm\",\n", " \"Horizontal_Distance_To_Fire_Points\"\n", "]\n", "\n", "feature_columns = (\n", " num_cols + cat_cols + target)\n", "\n", "data = pd.read_csv(out, header=None, names=feature_columns)\n", "data[\"Covertype\"] = data[\"Covertype\"] - 1 # Make sure output labels start at 0 instead of 1\n", "\n", "\n", "# Experiment setup\n", "def GenerateTrigger(df, poisoningRate, backdoorTriggerValues, targetLabel):\n", " rows_with_trigger = df.sample(frac=poisoningRate)\n", " rows_with_trigger[backdoorFeatures] = backdoorTriggerValues\n", " rows_with_trigger[target] = targetLabel\n", " return rows_with_trigger\n", "\n", "def GenerateBackdoorTrigger(df, backdoorTriggerValues, targetLabel):\n", " df[backdoorFeatures] = backdoorTriggerValues\n", " df[target] = targetLabel\n", " return df" ] }, { "cell_type": "markdown", "id": "d9a5a67a", "metadata": {}, "source": [ "## Prepare data" ] }, { "cell_type": "code", "execution_count": null, "id": "fa253ec3", "metadata": {}, "outputs": [], "source": [ "runIdx = 1\n", "poisoningRate = poisoningRates[0]\n", "\n", "random.seed(runIdx)\n", "\n", "checkpoint_path = 'FTtransformerCheckpoints/CovType_1F_OOB_' + str(poisoningRate) + \"-\" + str(runIdx) + \".pt\"\n" ] }, { "cell_type": "markdown", "id": "3bd019f0", "metadata": {}, "source": [ "## Setup model" ] }, { "cell_type": "code", "execution_count": null, "id": "2f51f794", "metadata": {}, "outputs": [], "source": [ "\n", "zero.set_randomness(config['seed'])\n", "dataset_dir = config['data']['path']\n", "\n", "D = lib.Dataset.from_dir(dataset_dir)\n", "X = D.build_X(\n", " normalization=config['data'].get('normalization'),\n", " num_nan_policy='mean',\n", " cat_nan_policy='new',\n", " cat_policy=config['data'].get('cat_policy', 'indices'),\n", " cat_min_frequency=config['data'].get('cat_min_frequency', 0.0),\n", " seed=config['seed'],\n", ")\n", "if not isinstance(X, tuple):\n", " X = (X, None)\n", "\n", "Y, y_info = D.build_y(config['data'].get('y_policy'))\n", "\n", "X = tuple(None if x is None else lib.to_tensors(x) for x in X)\n", "Y = lib.to_tensors(Y)\n", "device = torch.device(config['training']['device'])\n", "print(\"Using device:\", config['training']['device'])\n", "if device.type != 'cpu':\n", " X = tuple(\n", " None if x is None else {k: v.to(device) for k, v in x.items()} for x in X\n", " )\n", " Y_device = {k: v.to(device) for k, v in Y.items()}\n", "else:\n", " Y_device = Y\n", "X_num, X_cat = X\n", "del X\n", "if not D.is_multiclass:\n", " Y_device = {k: v.float() for k, v in Y_device.items()}\n", "\n", "train_size = D.size(lib.TRAIN)\n", "batch_size = config['training']['batch_size']\n", "epoch_size = math.ceil(train_size / batch_size)\n", "eval_batch_size = config['training']['eval_batch_size']\n", "chunk_size = None\n", "\n", "loss_fn = (\n", " F.binary_cross_entropy_with_logits\n", " if D.is_binclass\n", " else F.cross_entropy\n", " if D.is_multiclass\n", " else F.mse_loss\n", ")\n", "\n", "model = Transformer(\n", " d_numerical=0 if X_num is None else X_num['train'].shape[1],\n", " categories=lib.get_categories(X_cat),\n", " d_out=D.info['n_classes'] if D.is_multiclass else 1,\n", " **config['model'],\n", ").to(device)\n", "\n", "def needs_wd(name):\n", " return all(x not in name for x in ['tokenizer', '.norm', '.bias'])\n", "\n", "for x in ['tokenizer', '.norm', '.bias']:\n", " assert any(x in a for a in (b[0] for b in model.named_parameters()))\n", "parameters_with_wd = [v for k, v in model.named_parameters() if needs_wd(k)]\n", "parameters_without_wd = [v for k, v in model.named_parameters() if not needs_wd(k)]\n", "optimizer = lib.make_optimizer(\n", " config['training']['optimizer'],\n", " (\n", " [\n", " {'params': parameters_with_wd},\n", " {'params': parameters_without_wd, 'weight_decay': 0.0},\n", " ]\n", " ),\n", " config['training']['lr'],\n", " config['training']['weight_decay'],\n", ")\n", "\n", "stream = zero.Stream(lib.IndexLoader(train_size, batch_size, True, device))\n", "progress = zero.ProgressTracker(config['training']['patience'])\n", "training_log = {lib.TRAIN: [], lib.VAL: [], lib.TEST: []}\n", "timer = zero.Timer()\n", "output = \"Checkpoints\"\n", "\n", "def print_epoch_info():\n", " print(f'\\n>>> Epoch {stream.epoch} | {lib.format_seconds(timer())} | {output}')\n", " print(\n", " ' | '.join(\n", " f'{k} = {v}'\n", " for k, v in {\n", " 'lr': lib.get_lr(optimizer),\n", " 'batch_size': batch_size,\n", " 'chunk_size': chunk_size,\n", " }.items()\n", " )\n", " )\n", "\n", "def apply_model(part, idx):\n", " return model(\n", " None if X_num is None else X_num[part][idx],\n", " None if X_cat is None else X_cat[part][idx],\n", " )\n", "\n", "@torch.no_grad()\n", "def evaluate(parts):\n", " eval_batch_size = config['training']['eval_batch_size']\n", " model.eval()\n", " metrics = {}\n", " predictions = {}\n", " for part in parts:\n", " while eval_batch_size:\n", " try:\n", " predictions[part] = (\n", " torch.cat(\n", " [\n", " apply_model(part, idx)\n", " for idx in lib.IndexLoader(\n", " D.size(part), eval_batch_size, False, device\n", " )\n", " ]\n", " )\n", " .cpu()\n", " .numpy()\n", " )\n", " except RuntimeError as err:\n", " if not lib.is_oom_exception(err):\n", " raise\n", " eval_batch_size //= 2\n", " print('New eval batch size:', eval_batch_size)\n", " else:\n", " break\n", " if not eval_batch_size:\n", " RuntimeError('Not enough memory even for eval_batch_size=1')\n", " metrics[part] = lib.calculate_metrics(\n", " D.info['task_type'],\n", " Y[part].numpy(), # type: ignore[code]\n", " predictions[part], # type: ignore[code]\n", " 'logits',\n", " y_info,\n", " )\n", " for part, part_metrics in metrics.items():\n", " print(f'[{part:<5}]', lib.make_summary(part_metrics))\n", " return metrics, predictions\n", "\n", "def save_checkpoint(final):\n", " torch.save(\n", " {\n", " 'model': model.state_dict(),\n", " 'optimizer': optimizer.state_dict(),\n", " 'stream': stream.state_dict(),\n", " 'random_state': zero.get_random_state(),\n", " },\n", " checkpoint_path,\n", " )" ] }, { "cell_type": "markdown", "id": "214a2935", "metadata": {}, "source": [ "## Load model" ] }, { "cell_type": "code", "execution_count": null, "id": "3be456cc", "metadata": {}, "outputs": [], "source": [ "zero.set_randomness(config['seed'])\n", "\n", "# Load best checkpoint\n", "model.load_state_dict(torch.load(checkpoint_path)['model'])\n", "metrics, predictions = evaluate(['test', 'test_backdoor'])" ] }, { "cell_type": "markdown", "id": "c87fb163", "metadata": {}, "source": [ "# Save activations" ] }, { "cell_type": "code", "execution_count": null, "id": "146c8957", "metadata": {}, "outputs": [], "source": [ "activations_out = {}\n", "count = 0\n", "fails = 0\n", "def save_activation(name, mod, inp, out):\n", " if name not in activations_out:\n", " activations_out[name] = out.cpu().detach().numpy()\n", " \n", " global fails\n", " # Will fail if dataset not divisiable by batch size, try except to skip the last iteration\n", " try:\n", " # Save the activations for the input neurons\n", " activations_out[name] += out.cpu().detach().numpy()\n", " \n", " if \"layers.0.linear0\" in name:\n", " global count\n", " count += 1\n", " except:\n", " fails+=1\n", " \n", "hooks = []\n", "for name, m in model.named_modules():\n", " #print(name) # -> tabnet.final_mapping is the layer we are interested in\n", " if \"W_\" in name or \"linear\" in name:\n", " print(\"registered:\", name, \":\", m)\n", " hooks.append(m.register_forward_hook(partial(save_activation, name)))" ] }, { "cell_type": "code", "execution_count": null, "id": "9351dbce", "metadata": {}, "outputs": [], "source": [ "_ = evaluate(['test'])" ] }, { "cell_type": "code", "execution_count": null, "id": "09857b48", "metadata": {}, "outputs": [], "source": [ "for hook in hooks:\n", " hook.remove()" ] }, { "cell_type": "code", "execution_count": null, "id": "6f6bf9ee", "metadata": {}, "outputs": [], "source": [ "print(count)\n", "\n", "# fails should be equal to number of layers (12), or 0 if data is dividable by batch size\n", "print(len(activations_out))\n", "print(fails)" ] }, { "cell_type": "code", "execution_count": null, "id": "b796ee9a", "metadata": {}, "outputs": [], "source": [ "# Calculate mean activation value (although not really needed for ranking)\n", "for x in activations_out:\n", " activations_out[x] = activations_out[x]/count" ] }, { "cell_type": "code", "execution_count": null, "id": "a9dc87ce", "metadata": {}, "outputs": [], "source": [ "for x in activations_out:\n", " print(x)\n", " print(activations_out[x].shape)\n", " print()" ] }, { "cell_type": "code", "execution_count": null, "id": "ecee2260", "metadata": {}, "outputs": [], "source": [ "# Average over batch and second dimension\n", "for x in activations_out:\n", " activations_out[x] = activations_out[x].mean(axis=0).mean(axis=0)" ] }, { "cell_type": "code", "execution_count": null, "id": "0ccc53f7", "metadata": {}, "outputs": [], "source": [ "for x in activations_out:\n", " print(x)\n", " print(activations_out[x].shape)" ] }, { "cell_type": "code", "execution_count": null, "id": "1beca88e", "metadata": {}, "outputs": [], "source": [ "metrics = evaluate(['test', 'test_backdoor'])" ] }, { "cell_type": "code", "execution_count": null, "id": "3e8f4a93", "metadata": {}, "outputs": [], "source": [ "print(metrics[0]['test_backdoor']['accuracy'])\n", "print(metrics[0]['test']['accuracy'])" ] }, { "cell_type": "code", "execution_count": null, "id": "67f9462d", "metadata": {}, "outputs": [], "source": [ "# Argsort activations for each layer\n", "argsortActivations_out = {}\n", "for n in activations_out:\n", " argsortActivations_out[n] = np.argsort(activations_out[n])" ] }, { "cell_type": "code", "execution_count": null, "id": "890bbbda", "metadata": {}, "outputs": [], "source": [ "for name, m in model.named_parameters():\n", " if \"W_\" in name or \"linear\" in name:\n", " if \"weight\" in name:\n", " print(name, m.shape)" ] }, { "cell_type": "code", "execution_count": null, "id": "f627749f", "metadata": {}, "outputs": [], "source": [ "def pruneWithTreshold(argsortActivations, name, th=1, transpose=False, dim2=1):\n", " x = torch.tensor(argsortActivations[name].copy())\n", " x[x>=th] = 99999\n", " x[x