Commit d2a228f9 authored by Shreyan Chowdhury's avatar Shreyan Chowdhury
Browse files

Merge branch 'master' of gitlab.cp.jku.at:shreyan/moodwalk

parents 9a6ce41e 62e525c6
from utils import CURR_RUN_PATH, USE_GPU, logger
from pytorch_lightning import Trainer
from test_tube import Experiment
from models.baseline import CNN as Network
def run():
logger.info(CURR_RUN_PATH)
exp = Experiment(save_dir=CURR_RUN_PATH)
# TODO fill their training parameters
if USE_GPU:
trainer = Trainer(gpus=[0], distributed_backend='ddp',
experiment=exp, max_nb_epochs=10, train_percent_check=1.0,
fast_dev_run=False)
else:
trainer = Trainer(experiment=exp, max_nb_epochs=1, train_percent_check=0.1,
fast_dev_run=True)
model = Network() # TODO num_class
print(model)
trainer.fit(model)
trainer.test()
if __name__=='__main__':
run()
\ No newline at end of file
......@@ -82,7 +82,7 @@ def ERF_generate(model, loader):
counter = 0
accum = None
for step, (data, _, targets) in enumerate(loader):
for step, (data, targets) in enumerate(loader):
data = data.cuda()
data.requires_grad = True
outputs = model(data)
......@@ -128,6 +128,9 @@ def run():
trainer.fit(model)
save(model, "last_model.pth")
dataset = MelSpecDataset(phase='test', ann_root=PATH_ANNOTATIONS, spec_root=PATH_MELSPEC_DOWNLOADED_FRAMED)
test_loader = DataLoader(dataset=dataset,
batch_size=32,
......
from utils import CURR_RUN_PATH, USE_GPU, logger
from pytorch_lightning import Trainer
from pytorch_lightning.callbacks import EarlyStopping
from test_tube import Experiment
from models.resnet18 import Network
def run():
logger.info(CURR_RUN_PATH)
exp = Experiment(save_dir=CURR_RUN_PATH)
# TODO other training parameters?
# callbacks
early_stop = EarlyStopping(
monitor='val_loss', # TODO: check if this exists
patience=50,
verbose=True,
mode='min' # TODO: check if correct
)
if USE_GPU:
trainer = Trainer(gpus=[0], distributed_backend='ddp',
experiment=exp, max_nb_epochs=500, train_percent_check=1.0,
fast_dev_run=False, early_stop_callback=early_stop)
else:
trainer = Trainer(experiment=exp, max_nb_epochs=1, train_percent_check=0.1,
fast_dev_run=True)
model = Network() # TODO num_tags
print(model)
trainer.fit(model)
# TODO log testing results
trainer.test()
if __name__=='__main__':
run()
\ No newline at end of file
import torch.nn as nn
from utils import *
from datasets import MelSpecDataset
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from sklearn.metrics import roc_auc_score
# TODO pr-auc
# TODO f1-score
class CNN(pl.LightningModule):
def __init__(self, num_class):
super(CNN, self).__init__()
# init bn
self.bn_init = nn.BatchNorm2d(1)
# layer 1
self.conv_1 = nn.Conv2d(1, 64, 3, padding=1)
self.bn_1 = nn.BatchNorm2d(64)
self.mp_1 = nn.MaxPool2d((2, 4))
# layer 2
self.conv_2 = nn.Conv2d(64, 128, 3, padding=1)
self.bn_2 = nn.BatchNorm2d(128)
self.mp_2 = nn.MaxPool2d((2, 4))
# layer 3
self.conv_3 = nn.Conv2d(128, 128, 3, padding=1)
self.bn_3 = nn.BatchNorm2d(128)
self.mp_3 = nn.MaxPool2d((2, 4))
# layer 4
self.conv_4 = nn.Conv2d(128, 128, 3, padding=1)
self.bn_4 = nn.BatchNorm2d(128)
self.mp_4 = nn.MaxPool2d((3, 5))
# layer 5
self.conv_5 = nn.Conv2d(128, 64, 3, padding=1)
self.bn_5 = nn.BatchNorm2d(64)
self.mp_5 = nn.MaxPool2d((4, 4))
# classifier
self.dense = nn.Linear(64, num_class)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
x = x.unsqueeze(1)
# init bn
x = self.bn_init(x)
# layer 1
x = self.mp_1(nn.ELU()(self.bn_1(self.conv_1(x))))
# layer 2
x = self.mp_2(nn.ELU()(self.bn_2(self.conv_2(x))))
# layer 3
x = self.mp_3(nn.ELU()(self.bn_3(self.conv_3(x))))
# layer 4
x = self.mp_4(nn.ELU()(self.bn_4(self.conv_4(x))))
# layer 5
x = self.mp_5(nn.ELU()(self.bn_5(self.conv_5(x))))
# classifier
x = x.view(x.size(0), -1)
x = self.dropout(x)
logit = nn.Sigmoid()(self.dense(x))
return logit
def my_loss(self, y_hat, y):
return F.binary_cross_entropy(y_hat, y)
def configure_optimizers(self):
return [torch.optim.Adam(self.parameters(), lr=1e-4)] # from their code
\ No newline at end of file
from utils import *
from datasets import MelSpecDataset
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
import pytorch_lightning as pl
from sklearn.metrics import roc_auc_score
# TODO pr-auc
# TODO f1-score
from torchvision.models import resnet18
class Network(pl.LightningModule):
def __init__(self, num_tags):
super(Network, self).__init__()
self.num_tags = num_tags
self.model = resnet18(False) # TODO: need to check if optimizer recognizes these parameters
num_features = self.model.fc.in_features
self.model.fc = nn.Linear(num_features, self.num_tags) # overwriting fc layer
self.sig = nn.Sigmoid(self.num_tags)
def forward(self, x):
x = self.model(x)
x = self.sig(x)
return x
def my_loss(self, y_hat, y):
return F.binary_cross_entropy(y_hat, y)
def configure_optimizers(self):
return [torch.optim.Adam(self.parameters(), lr=0.001)]
\ No newline at end of file
......@@ -114,6 +114,12 @@ class MultiTagger(pl.LightningModule):
return yy/frames_to_process
def training_step(self, data_batch, batch_nb):
# TODO: this is not doing a backward pass?? something like this:
# self.optimizer.zero_grad()
# loss.backward()
# self.optimizer.step()
x, y = data_batch
y_hat = self.forward_full_song(x, y)
y = y.float()
......@@ -136,7 +142,7 @@ class MultiTagger(pl.LightningModule):
'rocauc':avg_auc}
def configure_optimizers(self):
return [torch.optim.Adam(self.parameters(), lr=0.02)]
return [torch.optim.Adam(self.parameters(), lr=0.02)] # TODO: isn't that lr too big?
@pl.data_loader
def tng_dataloader(self):
......
......@@ -240,6 +240,13 @@ def preprocess_specs(source_root, destination_root, frame_length=256, hop=1.0):
np.save(destination, framed_spec)
def save(model, path):
try:
torch.save(model.module.state_dict(), path)
except AttributeError:
torch.save(model.state_dict(), path)
if __name__=='__main__':
# TESTS
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment