TabNet_LOAN_3F_OOB.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. # Not everything from this is used
  2. import numpy as np
  3. import pandas as pd
  4. from sklearn.datasets import fetch_openml
  5. from sklearn.model_selection import train_test_split
  6. from sklearn.metrics import accuracy_score, log_loss, roc_auc_score
  7. from sklearn.preprocessing import LabelEncoder, StandardScaler
  8. import os
  9. import wget
  10. from pathlib import Path
  11. import shutil
  12. import gzip
  13. from matplotlib import pyplot as plt
  14. import torch
  15. from pytorch_tabnet.tab_model import TabNetClassifier
  16. import random
  17. import math
  18. # Experiment settings
  19. EPOCHS = 100
  20. RERUNS = 5 # How many times to redo the same setting
  21. DEVICE = "cuda:4"
  22. # Backdoor settings
  23. target = ["bad_investment"]
  24. backdoorFeatures = ["grade", "sub_grade", "int_rate"]
  25. backdoorTriggerValues = [8, 39, 34.089]
  26. targetLabel = 0 # Not a bad investment
  27. poisoningRates = [0.0, 0.00001, 0.000025, 0.00005, 0.0001, 0.0005, 0.001, 0.002, 0.003, 0.004, 0.005, 0.01]
  28. # Load dataset
  29. data = pd.read_pickle("data/LOAN/processed_balanced.pkl")
  30. # Drop zipcode for tabnet, because it cannot handle a
  31. # change in dimension of categorical variable between test and valid
  32. data.drop("zip_code", axis=1, inplace=True)
  33. # Setup data
  34. cat_cols = [
  35. "addr_state", "application_type", "disbursement_method",
  36. "home_ownership", "initial_list_status", "purpose", "term", "verification_status",
  37. #"zip_code"
  38. ]
  39. num_cols = [col for col in data.columns.tolist() if col not in cat_cols]
  40. num_cols.remove(target[0])
  41. feature_columns = (
  42. num_cols + cat_cols + target)
  43. categorical_columns = []
  44. categorical_dims = {}
  45. for col in cat_cols:
  46. print(col, data[col].nunique())
  47. l_enc = LabelEncoder()
  48. l_enc.fit(data[col].values)
  49. categorical_columns.append(col)
  50. categorical_dims[col] = len(l_enc.classes_)
  51. unused_feat = []
  52. features = [ col for col in data.columns if col not in unused_feat+[target]]
  53. cat_idxs = [ i for i, f in enumerate(features) if f in categorical_columns]
  54. cat_dims = [ categorical_dims[f] for i, f in enumerate(features) if f in categorical_columns]
  55. # Experiment setup
  56. def GenerateTrigger(df, poisoningRate, backdoorTriggerValues, targetLabel):
  57. rows_with_trigger = df.sample(frac=poisoningRate)
  58. rows_with_trigger[backdoorFeatures] = backdoorTriggerValues
  59. rows_with_trigger[target] = targetLabel
  60. return rows_with_trigger
  61. def GenerateBackdoorTrigger(df, backdoorTriggerValues, targetLabel):
  62. df[backdoorFeatures] = backdoorTriggerValues
  63. df[target] = targetLabel
  64. return df
  65. def doExperiment(poisoningRate, backdoorFeatures, backdoorTriggerValues, targetLabel, runIdx):
  66. # Load dataset
  67. # Changes to output df will not influence input df
  68. train_and_valid, test = train_test_split(data, stratify=data[target[0]], test_size=0.2, random_state=runIdx)
  69. # Apply backdoor to train and valid data
  70. random.seed(runIdx)
  71. train_and_valid_poisoned = GenerateTrigger(train_and_valid, poisoningRate, backdoorTriggerValues, targetLabel)
  72. train_and_valid.update(train_and_valid_poisoned)
  73. # Create backdoored test version
  74. # Also copy to not disturb clean test data
  75. test_backdoor = test.copy()
  76. # Drop rows that already have the target label
  77. test_backdoor = test_backdoor[test_backdoor[target[0]] != targetLabel]
  78. # Add backdoor to all test_backdoor samples
  79. test_backdoor = GenerateBackdoorTrigger(test_backdoor, backdoorTriggerValues, targetLabel)
  80. # Split dataset into samples and labels
  81. train, valid = train_test_split(train_and_valid, stratify=train_and_valid[target[0]], test_size=0.2, random_state=runIdx)
  82. X_train = train.drop(target[0], axis=1)
  83. y_train = train[target[0]]
  84. X_valid = valid.drop(target[0], axis=1)
  85. y_valid = valid[target[0]]
  86. X_test = test.drop(target[0], axis=1)
  87. y_test = test[target[0]]
  88. X_test_backdoor = test_backdoor.drop(target[0], axis=1)
  89. y_test_backdoor = test_backdoor[target[0]]
  90. # Normalize
  91. normalizer = StandardScaler()
  92. normalizer.fit(X_train[num_cols])
  93. X_train[num_cols] = normalizer.transform(X_train[num_cols])
  94. X_valid[num_cols] = normalizer.transform(X_valid[num_cols])
  95. X_test[num_cols] = normalizer.transform(X_test[num_cols])
  96. X_test_backdoor[num_cols] = normalizer.transform(X_test_backdoor[num_cols])
  97. # Create network
  98. clf = TabNetClassifier(
  99. device_name=DEVICE,
  100. n_d=64, n_a=64, n_steps=5,
  101. gamma=1.5, n_independent=2, n_shared=2,
  102. momentum=0.3,
  103. mask_type="entmax",
  104. )
  105. # Fit network on backdoored data
  106. clf.fit(
  107. X_train=X_train.values, y_train=y_train.values,
  108. eval_set=[(X_train.values, y_train.values), (X_valid.values, y_valid.values)],
  109. eval_name=['train', 'valid'],
  110. eval_metric=["auc", "accuracy"],
  111. max_epochs=EPOCHS, patience=EPOCHS,
  112. batch_size=16384, virtual_batch_size=512,
  113. #num_workers = 0,
  114. )
  115. # Evaluate backdoor
  116. y_pred = clf.predict(X_test_backdoor.values)
  117. ASR = accuracy_score(y_pred=y_pred, y_true=y_test_backdoor.values)
  118. y_pred = clf.predict(X_test.values)
  119. BA = accuracy_score(y_pred=y_pred, y_true=y_test.values)
  120. y_pred = clf.predict_proba(X_test.values)
  121. pos_probs = y_pred[:, 1]
  122. BAUC = roc_auc_score(y_test, pos_probs)
  123. return ASR, BA, BAUC
  124. # Start experiment
  125. # Global results
  126. ASR_results = []
  127. BA_results = []
  128. BAUC_results = []
  129. for poisoningRate in poisoningRates:
  130. # Run results
  131. ASR_run = []
  132. BA_run = []
  133. BAUC_run = []
  134. for run in range(RERUNS):
  135. ASR, BA, BAUC = doExperiment(poisoningRate, backdoorFeatures, backdoorTriggerValues, targetLabel, run+1)
  136. print("Results for", poisoningRate, "Run", run+1)
  137. print("ASR:", ASR)
  138. print("BA:", BA)
  139. print("BAUC:", BAUC)
  140. print("---------------------------------------")
  141. ASR_run.append(ASR)
  142. BA_run.append(BA)
  143. BAUC_run.append(BAUC)
  144. ASR_results.append(ASR_run)
  145. BA_results.append(BA_run)
  146. BAUC_results.append(BAUC_run)
  147. for idx, poisoningRate in enumerate(poisoningRates):
  148. print("Results for", poisoningRate)
  149. print("ASR:", ASR_results[idx])
  150. print("BA:", BA_results[idx])
  151. print("BAUC:", BAUC_results[idx])
  152. print("------------------------------------------")
  153. print("________________________")
  154. print("EASY COPY PASTE RESULTS:")
  155. print("ASR_results = [")
  156. for idx, poisoningRate in enumerate(poisoningRates):
  157. print(ASR_results[idx], ",")
  158. print("]")
  159. print()
  160. print("BA_results = [")
  161. for idx, poisoningRate in enumerate(poisoningRates):
  162. print(BA_results[idx], ",")
  163. print("]")
  164. print()
  165. print("BAUC_results = [")
  166. for idx, poisoningRate in enumerate(poisoningRates):
  167. print(BAUC_results[idx], ",")
  168. print("]")