Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Paul Primus
dcase2020_task2
Commits
838ce43f
Commit
838ce43f
authored
Jun 19, 2020
by
Paul Primus
Browse files
clean up
parent
4d7cc3e6
Changes
7
Expand all
Hide whitespace changes
Inline
Side-by-side
dcase2020_task2/models/cp_resnet_bn.py
View file @
838ce43f
# coding: utf-8
# Author: Khaled Koutini
# Institut of Computational Perception
# LIT - Linz Institute of Technology,
# Johannes Kepler Universität
# https://github.com/kkoutini/cpjku_dcase19
import
math
import
torch
...
...
dcase2020_task2/models/other/__init__.py
deleted
100644 → 0
View file @
4d7cc3e6
dcase2020_task2/models/other/made.py
deleted
100644 → 0
View file @
4d7cc3e6
import
torch
import
numpy
as
np
from
torch
import
nn
as
nn
,
distributions
as
D
from
dcase2020_task2.models.custom
import
create_masks
,
MaskedLinear
class
MADE
(
nn
.
Module
):
def
__init__
(
self
,
input_shape
,
reconstruction
,
hidden_size
=
4096
,
num_hidden
=
4
,
activation
=
'relu'
,
cond_label_size
=
None
,
**
kwargs
):
"""
Args:
TODO
"""
super
().
__init__
()
self
.
input_shape
=
input_shape
self
.
reconstruction
=
reconstruction
input_degree
=
torch
.
arange
(
int
(
np
.
prod
(
input_shape
))).
view
(
input_shape
).
transpose
(
2
,
1
).
reshape
(
-
1
)
# input_degree = torch.arange(int(np.prod(input_shape)))
# create masks
# use natural order as input order
masks
,
self
.
input_degrees
=
create_masks
(
int
(
np
.
prod
(
input_shape
)),
hidden_size
,
num_hidden
,
input_degrees
=
input_degree
,
input_order
=
'sequential'
)
# setup activation
if
activation
==
'relu'
:
activation_fn
=
nn
.
ReLU
()
elif
activation
==
'tanh'
:
activation_fn
=
nn
.
Tanh
()
else
:
raise
ValueError
(
'Check activation function.'
)
# construct model
self
.
input_layer
=
MaskedLinear
(
np
.
prod
(
input_shape
),
hidden_size
,
masks
[
0
],
cond_label_size
)
self
.
net
=
[]
for
m
in
masks
[
1
:
-
1
]:
self
.
net
+=
[
activation_fn
,
MaskedLinear
(
hidden_size
,
hidden_size
,
m
)]
self
.
net
+=
[
activation_fn
,
MaskedLinear
(
hidden_size
,
2
*
np
.
prod
(
input_shape
),
masks
[
-
1
].
repeat
(
2
,
1
))]
self
.
net
=
nn
.
Sequential
(
*
self
.
net
)
def
forward
(
self
,
batch
):
# MAF eq 4 -- return mean and log std
batch_size
=
batch
[
'observations'
].
shape
[
0
]
x
=
batch
[
'observations'
].
reshape
(
batch_size
,
-
1
)
y
=
batch
.
get
(
'y'
,
None
)
m
,
loga
=
self
.
net
(
self
.
input_layer
(
x
,
y
)).
chunk
(
chunks
=
2
,
dim
=
1
)
# this guys should be normally distributed....
u
=
(
x
-
m
)
*
torch
.
exp
(
-
loga
)
# MAF eq 5
batch
[
'u'
]
=
u
batch
[
'log_abs_det_jacobian'
]
=
-
loga
batch
[
'reconstruction'
]
=
m
.
reshape
(
batch_size
,
*
self
.
input_shape
)
batch
=
self
.
reconstruction
(
batch
)
return
batch
def
inverse
(
self
,
u
,
y
=
None
,
sum_log_abs_det_jacobians
=
None
):
# MAF eq 3
x
=
torch
.
zeros_like
(
u
)
# run through reverse model
for
i
in
self
.
input_degrees
:
m
,
loga
=
self
.
net
(
x
,
y
).
chunk
(
chunks
=
2
,
dim
=
1
)
x
[:,
i
]
=
u
[:,
i
]
*
torch
.
exp
(
loga
[:,
i
])
+
m
[:,
i
]
log_abs_det_jacobian
=
loga
return
x
,
log_abs_det_jacobian
dcase2020_task2/models/other/maf.py
deleted
100644 → 0
View file @
4d7cc3e6
"""
Masked Autoregressive Flow for Density Estimation
arXiv:1705.07057v4
"""
import
torch
import
torch.nn
as
nn
import
torch.distributions
as
D
import
matplotlib
from
dcase2020_task2.models
import
MADE
from
dcase2020_task2.models.custom
import
BatchNorm
,
\
FlowSequential
from
dcase2020_task2.models.other.made
import
MADEMOG
matplotlib
.
use
(
'Agg'
)
# --------------------
# Models
# --------------------
class
MAF
(
nn
.
Module
):
def
__init__
(
self
,
n_blocks
,
input_size
,
hidden_size
,
n_hidden
,
cond_label_size
=
None
,
activation
=
'relu'
,
input_order
=
'sequential'
,
batch_norm
=
True
):
super
().
__init__
()
# base distribution for calculation of log prob under the model
self
.
register_buffer
(
'base_dist_mean'
,
torch
.
zeros
(
input_size
))
self
.
register_buffer
(
'base_dist_var'
,
torch
.
ones
(
input_size
))
# construct model
modules
=
[]
self
.
input_degrees
=
None
for
i
in
range
(
n_blocks
):
modules
+=
[
MADE
(
input_size
,
hidden_size
,
n_hidden
,
cond_label_size
,
activation
,
input_order
,
self
.
input_degrees
)]
self
.
input_degrees
=
modules
[
-
1
].
input_degrees
.
flip
(
0
)
modules
+=
batch_norm
*
[
BatchNorm
(
input_size
)]
self
.
net
=
FlowSequential
(
*
modules
)
@
property
def
base_dist
(
self
):
return
D
.
Normal
(
self
.
base_dist_mean
,
self
.
base_dist_var
)
def
forward
(
self
,
x
,
y
=
None
):
return
self
.
net
(
x
,
y
)
def
inverse
(
self
,
u
,
y
=
None
):
return
self
.
net
.
inverse
(
u
,
y
)
def
log_prob
(
self
,
x
,
y
=
None
):
u
,
sum_log_abs_det_jacobians
=
self
.
forward
(
x
,
y
)
return
torch
.
sum
(
self
.
base_dist
.
log_prob
(
u
)
+
sum_log_abs_det_jacobians
,
dim
=
1
)
class
MAFMOG
(
nn
.
Module
):
""" MAF on mixture of gaussian MADE """
def
__init__
(
self
,
n_blocks
,
n_components
,
input_size
,
hidden_size
,
n_hidden
,
cond_label_size
=
None
,
activation
=
'relu'
,
input_order
=
'sequential'
,
batch_norm
=
True
):
super
().
__init__
()
# base distribution for calculation of log prob under the model
self
.
register_buffer
(
'base_dist_mean'
,
torch
.
zeros
(
input_size
))
self
.
register_buffer
(
'base_dist_var'
,
torch
.
ones
(
input_size
))
self
.
maf
=
MAF
(
n_blocks
,
input_size
,
hidden_size
,
n_hidden
,
cond_label_size
,
activation
,
input_order
,
batch_norm
)
# get reversed input order from the last layer (note in maf model, input_degrees are already flipped in for-loop model constructor
input_degrees
=
self
.
maf
.
input_degrees
# .flip(0)
self
.
mademog
=
MADEMOG
(
n_components
,
input_size
,
hidden_size
,
n_hidden
,
cond_label_size
,
activation
,
input_order
,
input_degrees
)
@
property
def
base_dist
(
self
):
return
D
.
Normal
(
self
.
base_dist_mean
,
self
.
base_dist_var
)
def
forward
(
self
,
x
,
y
=
None
):
u
,
maf_log_abs_dets
=
self
.
maf
(
x
,
y
)
u
,
made_log_abs_dets
=
self
.
mademog
(
u
,
y
)
sum_log_abs_det_jacobians
=
maf_log_abs_dets
.
unsqueeze
(
1
)
+
made_log_abs_dets
return
u
,
sum_log_abs_det_jacobians
def
inverse
(
self
,
u
,
y
=
None
):
x
,
made_log_abs_dets
=
self
.
mademog
.
inverse
(
u
,
y
)
x
,
maf_log_abs_dets
=
self
.
maf
.
inverse
(
x
,
y
)
sum_log_abs_det_jacobians
=
maf_log_abs_dets
.
unsqueeze
(
1
)
+
made_log_abs_dets
return
x
,
sum_log_abs_det_jacobians
def
log_prob
(
self
,
x
,
y
=
None
):
u
,
log_abs_det_jacobian
=
self
.
forward
(
x
,
y
)
# u = (N,C,L); log_abs_det_jacobian = (N,C,L)
# marginalize cluster probs
log_probs
=
torch
.
logsumexp
(
self
.
mademog
.
logr
+
self
.
base_dist
.
log_prob
(
u
)
+
log_abs_det_jacobian
,
dim
=
1
)
# out (N, L)
return
log_probs
.
sum
(
1
)
# out (N,)
dcase2020_task2/models/other/real_nvp.py
deleted
100644 → 0
View file @
4d7cc3e6
import
torch
from
torch
import
nn
as
nn
,
distributions
as
D
from
dcase2020_task2.models.custom
import
LinearMaskedCoupling
,
BatchNorm
,
FlowSequential
class
RealNVP
(
nn
.
Module
):
def
__init__
(
self
,
n_blocks
,
input_size
,
hidden_size
,
n_hidden
,
cond_label_size
=
None
,
batch_norm
=
True
):
super
().
__init__
()
# base distribution for calculation of log prob under the model
self
.
register_buffer
(
'base_dist_mean'
,
torch
.
zeros
(
input_size
))
self
.
register_buffer
(
'base_dist_var'
,
torch
.
ones
(
input_size
))
# construct model
modules
=
[]
mask
=
torch
.
arange
(
input_size
).
float
()
%
2
for
i
in
range
(
n_blocks
):
modules
+=
[
LinearMaskedCoupling
(
input_size
,
hidden_size
,
n_hidden
,
mask
,
cond_label_size
)]
mask
=
1
-
mask
modules
+=
batch_norm
*
[
BatchNorm
(
input_size
)]
self
.
net
=
FlowSequential
(
*
modules
)
@
property
def
base_dist
(
self
):
return
D
.
Normal
(
self
.
base_dist_mean
,
self
.
base_dist_var
)
def
forward
(
self
,
x
,
y
=
None
):
return
self
.
net
(
x
,
y
)
def
inverse
(
self
,
u
,
y
=
None
):
return
self
.
net
.
inverse
(
u
,
y
)
def
log_prob
(
self
,
x
,
y
=
None
):
u
,
sum_log_abs_det_jacobians
=
self
.
forward
(
x
,
y
)
return
torch
.
sum
(
self
.
base_dist
.
log_prob
(
u
)
+
sum_log_abs_det_jacobians
,
dim
=
1
)
\ No newline at end of file
notebooks/create_bar_plot.ipynb
deleted
100644 → 0
View file @
4d7cc3e6
This diff is collapsed.
Click to expand it.
notebooks/top_10_auc.png
deleted
100644 → 0
View file @
4d7cc3e6
31 KB
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment