# Generate your label following way to avoid overlapping codes among submissions:
# [Last name of corresponding author]_[Abbreviation of institute of the corresponding author]_task[task number]_[index number of your submission (1-4)]
label:Primus_JKU_task2_1
# Submission name
# This name will be used in the results tables when space permits.
name:Outlier Exposed Convolutional Classifier
# Submission name abbreviated
# This abbreviated name will be used in the results table when space is tight.
# Use a maximum of 10 characters.
abbreviation:OECC
# Authors of the submitted system.
# Mark authors in the order you want them to appear in submission lists.
# One of the authors has to be marked as corresponding author, this will be listed next to the submission in the results tables.
authors:
# First author
-lastname:Primus
firstname:Paul
email:paul.primus@jku.at# Contact email address
corresponding:true# Mark true for one of the authors
# Affiliation information for the author
affiliation:
institution:JKU
department:Computational Perception
location:Austria, Linz
# System information
system:
# System description, metadata provided here will be used to do a meta-analysis of the submitted system.
# Use general level tags, when possible use the tags provided in comments.
# If information field is not applicable to the system, use "!!null".
description:
# Audio input
# Please specify all sampling rates (comma-separated list).
# e.g. 16kHz, 22.05kHz, 44.1kHz
input_sampling_rate:16kHz
# Data augmentation methods
# Please specify all methods used (comma-separated list).
# e.g. mixup, time stretching, block mixing, pitch shifting, ...
data_augmentation:!!null
# Front-end (preprocessing) methods
# Please specify all methods used (comma-separated list).
# e.g. HPSS, WPE, NMF, NN filter, RPCA, ...
front_end:!!null
# Acoustic representation
# one or multiple labels, e.g. MFCC, log-mel energies, spectrogram, CQT, raw waveform, ...
acoustic_features:log-mel energies
# Embeddings
# Please specify all embedings used (comma-separated list).
# one or multiple, e.g. VGGish, OpenL3, ...
embeddings:!!null
# Machine learning
# In case using ensemble methods, please specify all methods used (comma-separated list).
# e.g. AE, VAE, GAN, GMM, k-means, OCSVM, normalizing flow, CNN, LSTM, random forest, ensemble, ...
machine_learning_method:CNN
# Method for aggregating predictions over time
# Please specify all methods used (comma-separated list).
# e.g. average, median, maximum, minimum, ...
aggregation_method:average
# Ensemble method subsystem count
# In case ensemble method is not used, mark !!null.
# e.g. 2, 3, 4, 5, ...
ensemble_method_subsystem_count:!!null
# Decision making in ensemble
# e.g. average, median, maximum, minimum, ...
decision_making:!!null
# External data usage method
# Please specify all usages (comma-separated list).
# e.g. simulation of anomalous samples, embeddings, pre-trained model, ...
external_data_usage:!!null
# Usage of the development dataset
# Please specify all usages (comma-separated list).
# e.g. development, pre-training, fine-tuning
development_data_usage:development
# System complexity, metadata provided here may be used to evaluate submitted systems from the computational load perspective.
complexity:
# Total amount of parameters used in the acoustic model.
# For neural networks, this information is usually given before training process in the network summary.
# For other than neural networks, if parameter count information is not directly available, try estimating the count as accurately as possible.
# In case of ensemble approaches, add up parameters for all subsystems.
# In case embeddings are used, add up parameter count of the embedding extraction networks and classification network.
# Use numerical value.
total_parameters:269992
# List of external datasets used in the submission.
# Development dataset is used here only as an example, list only external datasets
external_datasets:
# Dataset name
-name:DCASE 2020 Challenge Task 2 Development Dataset
# Dataset access URL
url:https://zenodo.org/record/3678171
# URL to the source code of the system [optional, highly recommended]
# Reproducibility will be used to evaluate submitted systems.
# Generate your label following way to avoid overlapping codes among submissions:
# [Last name of corresponding author]_[Abbreviation of institute of the corresponding author]_task[task number]_[index number of your submission (1-4)]
label:Primus_JKU_task2_1
# Submission name
# This name will be used in the results tables when space permits.
name:Outlier Exposed Convolutional Classifier
# Submission name abbreviated
# This abbreviated name will be used in the results table when space is tight.
# Use a maximum of 10 characters.
abbreviation:OECC
# Authors of the submitted system.
# Mark authors in the order you want them to appear in submission lists.
# One of the authors has to be marked as corresponding author, this will be listed next to the submission in the results tables.
authors:
# First author
-lastname:Primus
firstname:Paul
email:paul.primus@jku.at# Contact email address
corresponding:true# Mark true for one of the authors
# Affiliation information for the author
affiliation:
institution:JKU
department:Computational Perception
location:Austria, Linz
# System information
system:
# System description, metadata provided here will be used to do a meta-analysis of the submitted system.
# Use general level tags, when possible use the tags provided in comments.
# If information field is not applicable to the system, use "!!null".
description:
# Audio input
# Please specify all sampling rates (comma-separated list).
# e.g. 16kHz, 22.05kHz, 44.1kHz
input_sampling_rate:16kHz
# Data augmentation methods
# Please specify all methods used (comma-separated list).
# e.g. mixup, time stretching, block mixing, pitch shifting, ...
data_augmentation:!!null
# Front-end (preprocessing) methods
# Please specify all methods used (comma-separated list).
# e.g. HPSS, WPE, NMF, NN filter, RPCA, ...
front_end:!!null
# Acoustic representation
# one or multiple labels, e.g. MFCC, log-mel energies, spectrogram, CQT, raw waveform, ...
acoustic_features:log-mel energies
# Embeddings
# Please specify all embedings used (comma-separated list).
# one or multiple, e.g. VGGish, OpenL3, ...
embeddings:!!null
# Machine learning
# In case using ensemble methods, please specify all methods used (comma-separated list).
# e.g. AE, VAE, GAN, GMM, k-means, OCSVM, normalizing flow, CNN, LSTM, random forest, ensemble, ...
machine_learning_method:CNN
# Method for aggregating predictions over time
# Please specify all methods used (comma-separated list).
# e.g. average, median, maximum, minimum, ...
aggregation_method:average
# Ensemble method subsystem count
# In case ensemble method is not used, mark !!null.
# e.g. 2, 3, 4, 5, ...
ensemble_method_subsystem_count:!!null
# Decision making in ensemble
# e.g. average, median, maximum, minimum, ...
decision_making:!!null
# External data usage method
# Please specify all usages (comma-separated list).
# e.g. simulation of anomalous samples, embeddings, pre-trained model, ...
external_data_usage:!!null
# Usage of the development dataset
# Please specify all usages (comma-separated list).
# e.g. development, pre-training, fine-tuning
development_data_usage:development
# System complexity, metadata provided here may be used to evaluate submitted systems from the computational load perspective.
complexity:
# Total amount of parameters used in the acoustic model.
# For neural networks, this information is usually given before training process in the network summary.
# For other than neural networks, if parameter count information is not directly available, try estimating the count as accurately as possible.
# In case of ensemble approaches, add up parameters for all subsystems.
# In case embeddings are used, add up parameter count of the embedding extraction networks and classification network.
# Use numerical value.
total_parameters:269992
# List of external datasets used in the submission.
# Development dataset is used here only as an example, list only external datasets
external_datasets:
# Dataset name
-name:DCASE 2020 Challenge Task 2 Development Dataset
# Dataset access URL
url:https://zenodo.org/record/3678171
# URL to the source code of the system [optional, highly recommended]
# Reproducibility will be used to evaluate submitted systems.
# Generate your label following way to avoid overlapping codes among submissions:
# [Last name of corresponding author]_[Abbreviation of institute of the corresponding author]_task[task number]_[index number of your submission (1-4)]
label:Primus_JKU_task2_1
# Submission name
# This name will be used in the results tables when space permits.
name:Outlier Exposed Convolutional Classifier
# Submission name abbreviated
# This abbreviated name will be used in the results table when space is tight.
# Use a maximum of 10 characters.
abbreviation:OECC
# Authors of the submitted system.
# Mark authors in the order you want them to appear in submission lists.
# One of the authors has to be marked as corresponding author, this will be listed next to the submission in the results tables.
authors:
# First author
-lastname:Primus
firstname:Paul
email:paul.primus@jku.at# Contact email address
corresponding:true# Mark true for one of the authors
# Affiliation information for the author
affiliation:
institution:JKU
department:Computational Perception
location:Austria, Linz
# System information
system:
# System description, metadata provided here will be used to do a meta-analysis of the submitted system.
# Use general level tags, when possible use the tags provided in comments.