Commit 133e3c6f authored by Paul Primus's avatar Paul Primus
Browse files

add ResNet Training

parent cdb0061b
......@@ -20,6 +20,7 @@ class AudioSet(BaseDataSet):
power=2.0,
fmin=0,
normalize_raw=True,
normalize_spec=False,
hop_all=False
):
self.data_root = data_root
......@@ -31,6 +32,7 @@ class AudioSet(BaseDataSet):
self.fmin = fmin
self.hop_all = hop_all
self.normalize_raw = normalize_raw
self.normalize_spec = normalize_spec
kwargs = {
'data_root': self.data_root,
......@@ -41,7 +43,8 @@ class AudioSet(BaseDataSet):
'power': self.power,
'normalize': self.normalize_raw,
'fmin': self.fmin,
'hop_all': self.hop_all
'hop_all': self.hop_all,
'normalize_spec': self.normalize_spec
}
class_names = sorted([class_name for class_name in os.listdir(data_root) if os.path.isdir(os.path.join(data_root, class_name))])
......@@ -59,6 +62,7 @@ class AudioSet(BaseDataSet):
self.validation_set = None
self.mean = data_arrays.mean(axis=1, keepdims=True)
self.std = data_arrays.std(axis=1, keepdims=True)
del data_arrays
@property
def observation_shape(self) -> tuple:
......@@ -86,6 +90,7 @@ class AudioSetClassSubset(torch.utils.data.Dataset):
hop_size=512,
power=2.0,
normalize=True,
normalize_spec=False,
fmin=0,
hop_all=False,
max_file_per_class=2,
......@@ -104,6 +109,7 @@ class AudioSetClassSubset(torch.utils.data.Dataset):
self.class_name = class_name
self.max_file_per_class = max_file_per_class
self.max_file_length = max_file_length
self.normalize_spec = normalize_spec
files = glob.glob(os.path.join(data_root, class_name, '*.wav'))
......@@ -142,14 +148,15 @@ class AudioSetClassSubset(torch.utils.data.Dataset):
return data
def __load_data__(self, files):
file_name = "{}_{}_{}_{}_{}_{}_{}.npz".format(
file_name = "{}_{}_{}_{}_{}_{}_{}_{}.npz".format(
self.num_mel,
self.n_fft,
self.hop_size,
self.power,
self.normalize,
self.fmin,
self.class_name
self.class_name,
self.normalize_spec
)
file_path = os.path.join(self.data_root, file_name)
......@@ -194,6 +201,9 @@ class AudioSetClassSubset(torch.utils.data.Dataset):
else:
raise AttributeError
if self.normalize_spec:
x = (x - x.mean(axis=-1, keepdims=True)) / x.std(axis=-1, keepdims=True)
return x
def __get_meta_data__(self, file_path):
......@@ -206,10 +216,7 @@ class AudioSetClassSubset(torch.utils.data.Dataset):
if __name__ == '__main__':
mcmc = MCMDataSet(0, 0)
a = audio_set = AudioSet(
normalize=(mcmc.mean, mcmc.std)
).training_data_set()[0]
a = audio_set = AudioSet().training_data_set()[0]
print(a)
......
......@@ -4,13 +4,33 @@ from dcase2020_task2.data_sets import BaseDataSet, CLASS_MAP, INVERSE_CLASS_MAP,
from dcase2020_task2.data_sets import MachineDataSet
import numpy as np
valid_types = {
0: [1, 2, 5],
1: [0, 2, 5],
2: [0, 1, 5],
5: [0, 1, 2],
3: [4],
4: [3],
VALID_TYPES = {
'strict': {
0: [1, 2, 5],
1: [0, 2, 5],
2: [0, 1, 5],
5: [0, 1, 2],
3: [4],
4: [3],
},
'loose': {
0: [0, 1, 2, 5],
1: [1, 0, 2, 5],
2: [2, 0, 1, 5],
5: [5, 0, 1, 2],
3: [3, 4],
4: [4, 3],
},
'very_loose': {
0: [0, 1, 2, 3, 4, 5],
1: [0, 1, 2, 3, 4, 5],
2: [0, 1, 2, 3, 4, 5],
5: [0, 1, 2, 3, 4, 5],
3: [0, 1, 2, 3, 4, 5],
4: [0, 1, 2, 3, 4, 5],
},
}
......@@ -27,11 +47,15 @@ class ComplementMCMDataSet(BaseDataSet):
hop_size=512,
power=1.0,
fmin=0,
normalize_raw=False,
hop_all=False
normalize_raw=True,
normalize_spec=False,
hop_all=False,
valid_types='strict'
):
assert type(machine_type) == int and type(machine_id) == int
assert machine_id >= 0
assert machine_type >= 0
self.data_root = data_root
self.context = context
......@@ -42,6 +66,8 @@ class ComplementMCMDataSet(BaseDataSet):
self.fmin = fmin
self.hop_all = hop_all
self.normalize_raw = normalize_raw
self.normalize_spec = normalize_spec
self.valid_types = valid_types
kwargs = {
'data_root': self.data_root,
......@@ -52,15 +78,16 @@ class ComplementMCMDataSet(BaseDataSet):
'power': self.power,
'normalize': self.normalize_raw,
'fmin': self.fmin,
'hop_all': self.hop_all
'hop_all': self.hop_all,
'normalize_spec': self.normalize_spec
}
training_sets = []
data = []
for type_ in ALL_ID_MAP:
for type_ in VALID_TYPES[self.valid_types][machine_type]:
for id_ in ALL_ID_MAP[type_]:
if type_ != machine_type or (id_ != machine_id and machine_id != -1):
if type_ != machine_type or id_ != machine_id:
t = MachineDataSet(type_, id_, mode='training', **kwargs)
data.append(t.data)
training_sets.append(t)
......
......@@ -20,7 +20,8 @@ class MCMDataSet(BaseDataSet):
hop_size=512,
power=1.0,
fmin=0,
normalize_raw=False,
normalize_raw=True,
normalize_spec=False,
hop_all=False
):
self.data_root = data_root
......@@ -32,8 +33,7 @@ class MCMDataSet(BaseDataSet):
self.fmin = fmin
self.hop_all = hop_all
self.normalize_raw = normalize_raw
assert type(machine_type) == int and type(machine_id) == int
self.normalize_spec = normalize_spec
kwargs = {
'data_root': self.data_root,
......@@ -44,7 +44,8 @@ class MCMDataSet(BaseDataSet):
'power': self.power,
'normalize': self.normalize_raw,
'fmin': self.fmin,
'hop_all': self.hop_all
'hop_all': self.hop_all,
'normalize_spec': self.normalize_spec
}
if machine_id == -1:
......@@ -104,6 +105,7 @@ class MachineDataSet(torch.utils.data.Dataset):
hop_size=512,
power=2.0,
normalize=True,
normalize_spec=False,
fmin=0,
hop_all=False
):
......@@ -124,6 +126,7 @@ class MachineDataSet(torch.utils.data.Dataset):
self.machine_id = machine_id
self.fmin = fmin
self.hop_all = hop_all
self.normalize_spec = normalize_spec
if machine_id in TRAINING_ID_MAP[machine_type]:
root_folder = 'dev_data'
......@@ -162,7 +165,10 @@ class MachineDataSet(torch.utils.data.Dataset):
# get audio file index
item = item // self.num_samples_per_file
# load audio file and extract audio junk
offset = item * self.file_length + ((offset * self.context) if self.hop_all else offset)
residual = (self.file_length % self.context) + 1
offset = item * self.file_length + ((offset * self.context + np.random.randint(0, residual)) if self.hop_all else offset)
observation = self.data[:, offset:offset + self.context]
# create data object
meta_data = self.meta_data[item].copy()
......@@ -181,7 +187,7 @@ class MachineDataSet(torch.utils.data.Dataset):
return data
def __load_data__(self, files):
file_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}.npy".format(
file_name = "{}_{}_{}_{}_{}_{}_{}_{}_{}_{}.npy".format(
self.num_mel,
self.n_fft,
self.hop_size,
......@@ -190,7 +196,8 @@ class MachineDataSet(torch.utils.data.Dataset):
self.machine_type,
self.machine_id,
self.normalize,
self.fmin
self.fmin,
self.normalize_spec
)
file_path = os.path.join(self.data_root, file_name)
......@@ -244,6 +251,9 @@ class MachineDataSet(torch.utils.data.Dataset):
else:
raise AttributeError
if self.normalize_spec:
x = (x - x.mean(axis=-1, keepdims=True)) / x.std(axis=-1, keepdims=True)
return x
def __get_meta_data__(self, file_path):
......@@ -272,6 +282,8 @@ class MachineDataSet(torch.utils.data.Dataset):
'file_ids': os.sep.join(os.path.normpath(file_path).split(os.sep)[-4:])
}
if __name__ == '__main__':
for type_, id_ in enumerate_development_datasets():
......
......@@ -10,7 +10,6 @@ from sacred import SETTINGS
SETTINGS['CAPTURE_MODE'] = 'sys'
from datetime import datetime
from dcase2020_task2.data_sets import AudioSet, ComplementMCMDataSet
......@@ -35,22 +34,30 @@ class ClassificationExperiment(BaseExperiment, pl.LightningModule):
self.abnormal_data_set = ComplementMCMDataSet(
self.objects['machine_type'],
self.objects['machine_id'],
valid_types=self.objects['valid_types'],
**self.objects['fetaure_settings']
)
if self.objects.get('normalize') == 'normal':
# self.abnormal_data_set = AudioSet(
# **self.objects['fetaure_settings']
# )
if self.objects.get('normalize_dataset') == 'normal':
self.mean = torch.from_numpy(self.normal_data_set.mean)
self.std = torch.from_numpy(self.normal_data_set.std)
elif self.objects.get('normalize') == 'abnormal':
elif self.objects.get('normalize_dataset') == 'abnormal':
self.mean = torch.from_numpy(self.abnormal_data_set.mean)
self.std = torch.from_numpy(self.abnormal_data_set.std)
elif self.objects.get('normalize') == 'average':
elif self.objects.get('normalize_dataset') == 'average':
self.mean = torch.from_numpy((self.normal_data_set.mean + self.abnormal_data_set.mean) / 2)
# TODO: this is not correct (?)
self.std = torch.from_numpy((self.normal_data_set.std + self.abnormal_data_set.std) / 2)
else:
elif self.objects.get('normalize_dataset') is None:
print('No normalization.')
self.mean = torch.zeros(self.normal_data_set.mean.shape)
self.std = torch.ones(self.normal_data_set.std.shape)
else:
raise AttributeError
self.inf_data_loader = self.get_inf_data_loader(
torch.utils.data.DataLoader(
......@@ -167,39 +174,40 @@ def configuration():
#####################
# quick configuration, uses default parameters of more detailed configuration
#####################
machine_type = 0
machine_id = 0
machine_type = 3
machine_id = 1
num_mel = 128
n_fft = 1024
hop_size = 512
power = 2.0
fmin = 0
context = 32
context = 256
valid_types = 'very_loose'
model_class = 'dcase2020_task2.models.CNN'
hidden_size = 256
model_class = 'dcase2020_task2.models.ResNet'
rf = 'normal'
hidden_size = 64
num_hidden = 4
dropout_probability = 0.0
epochs = 100
hop_all = False
hop_all = True
debug = False
if debug:
num_workers = 0
epochs = 100
else:
num_workers = 4
loss_class = 'dcase2020_task2.losses.AUC'
batch_size = 512
batch_size = 32
learning_rate = 1e-4
weight_decay = 0
normalize_raw = True
normalize = None
normalize_spec = False
normalize_dataset = None
# TODO: change default descriptor
descriptor = "ClassificationExperiment_Model:[{}_{}_{}_{}]_Training:[{}_{}_{}_{}]_Features:[{}_{}_{}_{}_{}_{}_{}]_{}".format(
......@@ -233,7 +241,8 @@ def configuration():
'normalize_raw': normalize_raw,
'power': power,
'fmin': fmin,
'hop_all': hop_all
'hop_all': hop_all,
'normalize_spec': normalize_spec
}
data_set = {
......@@ -262,6 +271,8 @@ def configuration():
'hidden_size': hidden_size,
'num_hidden': num_hidden,
'base_channels': hidden_size,
'num_outputs': 1,
'rf': rf,
'dropout_probability': dropout_probability,
'batch_norm': False
}
......@@ -273,7 +284,7 @@ def configuration():
'@optimizer',
],
'kwargs': {
'step_size': 50,
'step_size': 100,
'gamma': 0.1
}
}
......
......@@ -4,4 +4,4 @@ from dcase2020_task2.losses.nll_loss import NLLReconstruction
from dcase2020_task2.losses.np_loss import NP
from dcase2020_task2.losses.auc_loss import AUC
from dcase2020_task2.losses.bce_loss import BCE
from dcase2020_task2.losses.bce_loss import BCE
\ No newline at end of file
import torch.nn
import torch
from dcase2020_task2.models.custom import ACTIVATION_DICT, init_weights
from dcase2020_task2.models.cp_resnet import Network
from dcase2020_task2.models.cp_resnet_bn import Network
class ResNet(torch.nn.Module):
......@@ -11,7 +11,7 @@ class ResNet(torch.nn.Module):
input_shape,
num_outputs=1,
base_channels=128,
rf='very_small',
rf='normal',
**kwargs
):
......@@ -68,17 +68,17 @@ class ResNet(torch.nn.Module):
'stop_before_global_avg_pooling': None,
'use_check_point': None
},
'very_small_short': {
'a_bit_larger': {
'arch': 'cp_resnet',
'base_channels': base_channels,
'block_type': 'basic',
'depth': 26,
'input_shape': (1, *input_shape),
'multi_label': False,
'n_blocks_per_stage': [3, 1, 1],
'n_blocks_per_stage': [4, 1, 2],
'n_classes': num_outputs,
'prediction_threshold': 0.4,
'stage1': {'k1s': [3, 3, 3, 1], 'k2s': [1, 1, 1, 1], 'maxpool': [1, 2, 4]},
'stage1': {'k1s': [3, 3, 3, 3], 'k2s': [1, 3, 3, 3], 'maxpool': [1, 2, 4]},
'stage2': {'k1s': [1, 1, 1, 1], 'k2s': [1, 1, 1, 1], 'maxpool': []},
'stage3': {'k1s': [1, 1, 1, 1], 'k2s': [1, 1, 1, 1], 'maxpool': []},
'use_bn': True,
......@@ -91,7 +91,31 @@ class ResNet(torch.nn.Module):
'attention_avg': None,
'stop_before_global_avg_pooling': None,
'use_check_point': None
}
},
'a_bit_smaller': {
'arch': 'cp_resnet',
'base_channels': base_channels,
'block_type': 'basic',
'depth': 26,
'input_shape': (1, *input_shape),
'multi_label': False,
'n_blocks_per_stage': [4, 1, 2],
'n_classes': num_outputs,
'prediction_threshold': 0.4,
'stage1': {'k1s': [3, 3, 3, 3], 'k2s': [1, 3, 1, 1], 'maxpool': [1, 2, 4]},
'stage2': {'k1s': [1, 1, 1, 1], 'k2s': [1, 1, 1, 1], 'maxpool': []},
'stage3': {'k1s': [1, 1, 1, 1], 'k2s': [1, 1, 1, 1], 'maxpool': []},
'use_bn': True,
'weight_init': 'fixup',
'pooling_padding': None,
'use_raw_spectograms': None,
'apply_softmax': None,
'n_channels': None,
'grow_a_lot': None,
'attention_avg': None,
'stop_before_global_avg_pooling': None,
'use_check_point': None
}
}
self.net = Network(configs[rf])
......
......@@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 32,
"outputs": [],
"source": [
"from pymongo import MongoClient\n",
......@@ -90,7 +90,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 33,
"outputs": [],
"source": [
"client = MongoClient('mongodb://student2.cp.jku.at:27017/')"
......@@ -105,12 +105,12 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 34,
"outputs": [],
"source": [
"# experiments_made = [r for r in client.dcase2020_2_madog.runs.find({\"experiment.name\": \"dcase2020_task2_BaselineExperiment\"})]\n",
"# experiments_baseline = [r for r in client.dcase2020_task2_ae_baseline_gridsearch.runs.find({\"experiment.name\": \"dcase2020_task2_BaselineExperiment\"})]\n",
"experiments_classification = [r for r in client.dcase2020_task2_complement_classification_gridsearch.runs.find({\"experiment.name\": \"dcase2020_task2_ClassificationExperiment\"})]\n",
"# experiments_classification = [r for r in client.dcase2020_task2_complement_classification_gridsearch.runs.find({\"experiment.name\": \"dcase2020_task2_ClassificationExperiment\"})]\n",
"experiments_conv_class = [r for r in client.dcase2020_task2_conv_complement_classification_gridsearch.runs.find({\"experiment.name\": \"dcase2020_task2_ClassificationExperiment\"})]"
],
"metadata": {
......@@ -123,7 +123,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 35,
"outputs": [],
"source": [
"# print(len(experiments_classification))\n",
......@@ -140,7 +140,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 36,
"outputs": [
{
"data": {
......@@ -148,7 +148,7 @@
},
"metadata": {},
"output_type": "execute_result",
"execution_count": 22
"execution_count": 36
}
],
"source": [
......@@ -170,7 +170,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 37,
"outputs": [],
"source": [
"n, m = get_record(baseline_both)\n",
......@@ -197,7 +197,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 38,
"outputs": [
{
"name": "stdout",
......
conda activate dcase2020_task2
./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=128 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=128 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=128 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=128 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=256 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=256 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=256 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=256 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=512 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=512 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=512 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=512 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=1024 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=1024 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=1024 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=1024 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=128 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=128 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=128 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=128 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=256 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=256 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=256 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=256 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=512 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=512 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=512 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=512 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=1024 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=1024 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=1024 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=1024 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
conda activate dcase2020_task2
./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=128 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=128 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=128 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=128 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=256 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=256 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=256 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=256 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=512 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=512 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=512 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=512 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=1024 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=1024 latent_size=8 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=1024 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=1024 latent_size=16 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 0 "debug=False num_hidden=2 hidden_size=128 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 1 "debug=False num_hidden=3 hidden_size=128 latent_size=32 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 2 "debug=False num_hidden=2 hidden_size=128 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &
#./scripts/per_id_run.sh baseline_experiment 3 "debug=False num_hidden=3 hidden_size=128 latent_size=64 weight_decay=0 model_class=dcase2020_task2.models.AE reconstruction_class=dcase2020_task2.losses.MSEReconstruction -m student2.cp.jku.at:27017:dcase2020_task2_ae_baseline_gridsearch" &