SUE: Sparsity-based Uncertainty Estimation
Available here!
seed=0
dataset=" sentiment" # sentiment, anli
task=" paradetox" # sentiment: paradetox, jigsaw, twitter; anli: r1, r2, r3
base_model=" google-bert/bert-base-uncased"
base_name=$( echo " $base_model " | cut -d " /" -f 2)
calibration_train_test_split=20 # percentage
# spare params
lmb=0.04
base=256
# set HF username here
user=" anonym"
# Training is optional you can use a finetuned model from huggingface
python train_model.py --upload --seed " $seed " --dataset " $dataset " --task " $task " --base_model " $base_model " --hf_user " $user "
# Extract hidden states from finetuned model
python extract_hidden_states.py --model_name " ${user} /${base_name} _${task} _seed-${seed} " --dataset " $dataset " --task " $task " --calibration_train_split_size " $calibration_train_test_split "
# Make sparse embeddings
python make_sparse.py --model_name " ${user} /${base_name} _${task} _seed-${seed} " --dataset " $dataset " --task " $task " --calibration_train_split_size " $calibration_train_test_split " --lmb " $lmb " --basis " $base "
# Extract states for Monte-Carlo methods
python mc_dropout.py --model_name " ${user} /${base_name} _${task} _seed-${seed} " --dataset " $dataset " --task " $task "
# collect and evaluate metrics, it will evauluate everything within the "./data/" folder
python -m evaluate