Commit 8d0d8559 authored by Paul Primus's avatar Paul Primus
Browse files

add RK3 traning script

parent fd3d3b7f
......@@ -52,6 +52,7 @@ class ComplementMCMDataSet(BaseDataSet):
training_set = MachineDataSet(machine_type, machine_id, mode='training', **kwargs)
validation_set = MachineDataSet(machine_type, machine_id, mode='validation', **kwargs)
if normalize is None:
mean = training_set.data.mean(axis=1, keepdims=True)
std = training_set.data.std(axis=1, keepdims=True)
......@@ -69,7 +70,7 @@ class ComplementMCMDataSet(BaseDataSet):
for type_ in TRAINING_ID_MAP:
for id_ in TRAINING_ID_MAP[type_]:
if type_ != machine_type or id_ != machine_id:
if type_ != machine_type:
t = MachineDataSet(type_, id_, mode='training', **kwargs)
t.data = (t.data - mean) / std
......
......@@ -134,14 +134,14 @@ def configuration():
seed = 1220
deterministic = False
id = datetime.now().strftime("%Y-%m-%d_%H:%M:%S")
log_path = os.path.join('..', 'experiment_logs', id)
log_path = os.path.join('experiment_logs', id)
#####################
# quick configuration, uses default parameters of more detailed configuration
#####################
machine_type = 0
machine_id = 0
machine_id = 2
num_mel = 128
n_fft = 1024
......@@ -150,10 +150,10 @@ def configuration():
fmin = 0
context = 5
model_class = 'models.FCNN'
hidden_size = 512
model_class = 'dcase2020_task2.models.FCNN'
hidden_size = 256
num_hidden = 3
latent_size = 64
dropout_probability = 0.0
debug = False
if debug:
......@@ -163,9 +163,9 @@ def configuration():
epochs = 100
loss_class = 'dcase2020_task2.losses.BCE'
batch_size = 512
learning_rate = 1e-3
weight_decay = 1e-5
batch_size = 8192
learning_rate = 1e-4
weight_decay = 0
normalize_raw = True
......@@ -174,7 +174,7 @@ def configuration():
model_class,
hidden_size,
num_hidden,
latent_size,
dropout_probability,
batch_size,
learning_rate,
weight_decay,
......@@ -241,7 +241,12 @@ def configuration():
'class': model_class,
'args': [
'@data_set.observation_shape'
]
],
'kwargs': {
'hidden_size': hidden_size,
'num_hidden': num_hidden,
'dropout_probability': dropout_probability
}
}
lr_scheduler = {
......@@ -250,8 +255,8 @@ def configuration():
'@optimizer',
],
'kwargs': {
'step_size': 25,
'gamma': 0.3
'step_size': 50,
'gamma': 0.1
}
}
......@@ -269,7 +274,7 @@ def configuration():
}
trainer = {
'class': 'trainers.PTLTrainer',
'class': 'dcase2020_task2.trainers.PTLTrainer',
'kwargs': {
'max_epochs': epochs,
'checkpoint_callback': False,
......
......@@ -29,6 +29,6 @@ class BCE(BaseLoss):
batch_normal['normal_scores_mean'] = normal_scores.mean()
batch_normal['normal_scores_std'] = normal_scores.std()
batch_normal['abnormal_scores_mean'] = abnormal_scores.mean()
batch_normal['abnormal_scores_std'] = normal_scores.std()
batch_normal['abnormal_scores_std'] = abnormal_scores.std()
return batch_normal
......@@ -16,22 +16,26 @@ class FCNN(torch.nn.Module):
num_hidden=3,
num_outputs=1,
activation='relu',
batch_norm=False
batch_norm=False,
dropout_probability=0.1
):
super().__init__()
activation_fn = activation_dict[activation]
self.input_shape = input_shape
sizes = [np.prod(input_shape)] + [hidden_size // (2**l) for l in range(num_hidden)] + [num_outputs]
sizes = [np.prod(input_shape)] + [hidden_size for _ in range(num_hidden)] + [num_outputs]
layers = []
for i, o in zip(sizes[:-1], sizes[1:]):
layers.append(torch.nn.Linear(i, o))
if batch_norm:
layers.append(torch.nn.BatchNorm1d(o))
layers.append(torch.nn.Dropout(p=dropout_probability))
layers.append(activation_fn())
_ = layers.pop()
_ = layers.pop()
if batch_norm:
_ = layers.pop()
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment