From b154dfbaafa1fef96baf6c7a3a31e4aee4e3e3ff Mon Sep 17 00:00:00 2001 From: Prasanna1991 Date: Fri, 22 May 2020 00:03:50 -0400 Subject: [PATCH] added script file --- README.md | 3 +++ run_main.sh | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 run_main.sh diff --git a/README.md b/README.md index b2982d8..0f252b0 100644 --- a/README.md +++ b/README.md @@ -4,12 +4,15 @@ The implementation of "Semi-supervised Medical Image Classification with Global # How to run? python latent-mixing.py +For detail, follow run_main.sh + # Requirements: 1. pyTorch 2. pickle 3. PIL 4. torchvision 5. sklearn + There might be more requirements but shouldn't be difficult to install them using conda. # Credit: diff --git a/run_main.sh b/run_main.sh new file mode 100644 index 0000000..439f9f2 --- /dev/null +++ b/run_main.sh @@ -0,0 +1,26 @@ + +# For the case: labeled = 300 + +# Input mixup +python latent-mixing.py --augu --out 'Final_models/ip1@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'input' --alpha 1.0 --manualSeed 1 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/ip2@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'input' --alpha 1.0 --manualSeed 2 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/ip3@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'input' --alpha 1.0 --manualSeed 3 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/ip4@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'input' --alpha 1.0 --manualSeed 4 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/ip5@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'input' --alpha 1.0 --manualSeed 5 --noSharp --gpu 0 + +# Input + Latent mixup +python latent-mixing.py --augu --out 'Final_models/mn1@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'mixup_hidden' --alpha 2.0 --manualSeed 1 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/mn2@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'mixup_hidden' --alpha 2.0 --manualSeed 2 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/mn3@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'mixup_hidden' --alpha 2.0 --manualSeed 3 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/mn4@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'mixup_hidden' --alpha 2.0 --manualSeed 4 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/mn5@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'mixup_hidden' --alpha 2.0 --manualSeed 5 --noSharp --gpu 0 + +# Only Latent mixup +python latent-mixing.py --augu --out 'Final_models/oh1@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'only_hidden' --alpha 2.0 --manualSeed 1 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/oh2@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'only_hidden' --alpha 2.0 --manualSeed 2 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/oh3@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'only_hidden' --alpha 2.0 --manualSeed 3 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/oh4@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'only_hidden' --alpha 2.0 --manualSeed 4 --noSharp --gpu 0 +python latent-mixing.py --augu --out 'Final_models/oh5@300' --epochs 256 --batch-size 128 --lr 0.0001 --schedule 50 125 --howManyLabelled 300 --mixup 'only_hidden' --alpha 2.0 --manualSeed 5 --noSharp --gpu 0 + + +