Commit 0114d182 authored by Richard Vogl's avatar Richard Vogl
Browse files

model changes

parent 14d30ff5
......@@ -16,13 +16,14 @@ import theano.tensor as T
import datetime
import lasagne
from piano_transcription.utils import print_net_architecture, select_model, collect_inputs
from piano_transcription.utils import print_net_architecture, select_model, collect_inputs, BColors
from piano_transcription.data import load_data
from piano_transcription.data.data_pools import UniversalRegressionDataPool
from piano_transcription import OUTPUT_PATH
THRESHOLD = 0.5
MAX_NUMEPOCHS = 10000
col = BColors()
def run(model, model_name, learn_rate, batch_size, split, k_samples):
......@@ -86,7 +87,8 @@ def run(model, model_name, learn_rate, batch_size, split, k_samples):
max_refinements = 3
refinements = max_refinements
cur_patience = patience
new_lr = learn_rate
# Finally, launch the training loop.
for epoch in range(MAX_NUMEPOCHS):
# In each epoch, we do a full pass over the training data:
......@@ -116,7 +118,8 @@ def run(model, model_name, learn_rate, batch_size, split, k_samples):
print("\rEpoch %3d of %d took %1.3f s (valid: %1.3f s) -- patience: %d " %
(epoch + 1, MAX_NUMEPOCHS, time.time() - start_time, time.time() - valid_start_time, cur_patience))
error = train_loss_sum / train_batches
print(" training loss: %1.3f valid loss: %1.3f" % (error, valid_loss[epoch]))
print((" training loss: %1.3f "+col.print_colored("valid loss: %1.3f", BColors.HEADER)+" @ lr %1.6f") %
(error, valid_loss[epoch], new_lr))
better = valid_loss[epoch] < valid_loss[best_valid_loss_epoch]
if epoch == 0 or better:
best_valid_loss_epoch = epoch
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment