generated from victoresque/pytorch-template
-
Notifications
You must be signed in to change notification settings - Fork 1
/
commands.sh
executable file
·61 lines (30 loc) · 2.78 KB
/
commands.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
#!/bin/bash
source ~/anaconda3/etc/profile.d/conda.sh
conda activate pytorch18cuda10
cd ~/Desktop/AntiDriftDeepMalware/
### CNN
echo "starting RSRE-TW10"
/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py -tw 10 --shuffled_epochs --dataset_path=../miller60K_80dev_20future/rand_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=CNN -e 100 --save_checkpoints=all
echo "starting TSRE-TW10"
/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py -tw 10 --shuffled_epochs --dataset_path=../miller60K_80dev_20future/time_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=CNN -e 100 --save_checkpoints=all
### LSTM+LastPool
echo "starting RSRE-TW10"
/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py -tw 10 --shuffled_epochs --dataset_path=../miller60K_80dev_20future/rand_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=LSTM+LastPool -e 100 --save_checkpoints=all
echo "starting TSRE-TW10"
/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py -tw 10 --shuffled_epochs --dataset_path=../miller60K_80dev_20future/time_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=LSTM+LastPool -e 100 --save_checkpoints=all
### GRU+LastPool
echo "starting RSRE-TW10"
/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py -tw 10 --shuffled_epochs --dataset_path=../miller60K_80dev_20future/rand_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=GRU+LastPool -e 100 --save_checkpoints=all
echo "starting TSRE-TW10"
/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py -tw 10 --shuffled_epochs --dataset_path=../miller60K_80dev_20future/time_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=GRU+LastPool -e 100 --save_checkpoints=all
#echo "starting second RSRE"
#/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py --shuffled_epochs --dataset_path=../miller60K_80dev_20future/rand_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=GRU+AvgPool -e 100 --save_checkpoints=all
#echo "done second RSRE"
#echo "starting first TSTE"
#/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py --dataset_path=../miller60K_80dev_20future/time_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=GRU+AvgPool -e 100 --save_checkpoints=all
#echo "done first TSTE"
#echo "done third TSRE"
#/home/john/anaconda3/envs/pytorch18cuda10/bin/python train.py --shuffled_epochs --dataset_path=../miller60K_80dev_20future/time_split/ --samples_count=0 --batch_size=30 --train_procedure=all --architecture=GRU+AvgPool -e 100 --save_checkpoints=all
#echo "done third TSRE"
echo "done all"
# find GRU+AvgPool/ -name '*.gz' -exec cp --parents \{\} /run/media/john/NR/ \;