Commit d78e73b0 authored by Paul Primus's avatar Paul Primus
Browse files

add micro seconds to experiment name

parent cdde06a0
......@@ -162,5 +162,89 @@
|valve | 5 |4 |0.6561 |-0.0869 |0.5175 |-0.0022 |
|valve | 5 |6 |0.7242 |+0.1852 |0.5417 |+0.0574 |
## Spec normalized per_mic, Raw normalized
## raw normalized, spec normalized per_mic, complement all
| Machine | Type | ID | AUC | to BL | AUC | to BL |
| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |
|fan | 0 |0 |0.6683 |+0.1242 |0.6060 |+0.1123 |
|fan | 0 |2 |0.9883 |+0.2543 |0.9456 |+0.3975 |
|fan | 0 |4 |0.7859 |+0.1698 |0.6636 |+0.1310 |
|fan | 0 |6 |0.7462 |+0.0070 |0.8455 |+0.3220 |
|pump | 1 |0 |0.8785 |+0.2070 |0.7284 |+0.1610 |
|pump | 1 |2 |0.5635 |-0.0518 |0.5462 |-0.0348 |
|pump | 1 |4 |0.9995 |+0.1162 |0.9974 |+0.3264 |
|pump | 1 |6 |0.9560 |+0.2105 |0.8653 |+0.2851 |
|slider | 2 |0 |0.9679 |+0.0060 |0.8743 |+0.0599 |
|slider | 2 |2 |0.8813 |+0.0916 |0.6339 |-0.0029 |
|slider | 2 |4 |0.9905 |+0.0475 |0.9500 |+0.2302 |
|slider | 2 |6 |0.8336 |+0.1377 |0.5577 |+0.0675 |
|ToyCar | 3 |1 |0.6665 |-0.1471 |0.6647 |-0.0193 |
|ToyCar | 3 |2 |0.9024 |+0.0427 |0.8195 |+0.0423 |
|ToyCar | 3 |3 |0.9814 |+0.3484 |0.9271 |+0.3750 |
|ToyCar | 3 |4 |0.9982 |+0.1537 |0.9904 |+0.3007 |
|ToyConveyor | 4 |1 |0.8454 |+0.0647 |0.7466 |+0.1041 |
|ToyConveyor | 4 |2 |0.6047 |-0.0369 |0.5448 |-0.0153 |
|ToyConveyor | 4 |3 |0.7071 |-0.0464 |0.5888 |-0.0215 |
|valve | 5 |0 |0.9876 |+0.3000 |0.9518 |+0.4348 |
|valve | 5 |2 |0.7835 |+0.1017 |0.5197 |+0.0014 |
|valve | 5 |4 |0.7010 |-0.0420 |0.5289 |+0.0092 |
|valve | 5 |6 |0.8042 |+0.2652 |0.5425 |+0.0582 |
## raw normalized, spec unnormalized, complement all
| Machine | Type | ID | AUC | to BL | AUC | to BL |
| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |
|fan | 0 |0 |0.5857 |+0.0416 |0.5846 |+0.0909 |
|fan | 0 |2 |0.9865 |+0.2525 |0.9315 |+0.3834 |
|fan | 0 |4 |0.7753 |+0.1592 |0.6568 |+0.1242 |
|fan | 0 |6 |0.7520 |+0.0128 |0.8455 |+0.3220 |
|pump | 1 |0 |0.8373 |+0.1658 |0.7126 |+0.1452 |
|pump | 1 |2 |0.5577 |-0.0576 |0.5225 |-0.0585 |
|pump | 1 |4 |0.9995 |+0.1162 |0.9974 |+0.3264 |
|pump | 1 |6 |0.9311 |+0.1856 |0.8354 |+0.2552 |
|slider | 2 |0 |0.9946 |+0.0327 |0.9821 |+0.1677 |
|slider | 2 |2 |0.7940 |+0.0043 |0.5494 |-0.0874 |
|slider | 2 |4 |0.9953 |+0.0523 |0.9752 |+0.2554 |
|slider | 2 |6 |0.8202 |+0.1243 |0.5795 |+0.0893 |
|ToyCar | 3 |1 |0.6782 |-0.1354 |0.6485 |-0.0355 |
|ToyCar | 3 |2 |0.8749 |+0.0152 |0.7847 |+0.0075 |
|ToyCar | 3 |3 |0.9746 |+0.3416 |0.9001 |+0.3480 |
|ToyCar | 3 |4 |0.0000 |-0.8445 |0.0000 |-0.6897 |
|ToyCar | 3 |4 |0.9975 |+0.1530 |0.9867 |+0.2970 |
|ToyConveyor | 4 |1 |0.8521 |+0.0714 |0.7509 |+0.1084 |
|ToyConveyor | 4 |2 |0.5919 |-0.0497 |0.5407 |-0.0194 |
|ToyConveyor | 4 |3 |0.7277 |-0.0258 |0.6033 |-0.0070 |
|valve | 5 |0 |0.9838 |+0.2962 |0.9288 |+0.4118 |
|valve | 5 |2 |0.7559 |+0.0741 |0.5048 |-0.0135 |
|valve | 5 |4 |0.7259 |-0.0171 |0.5373 |+0.0176 |
|valve | 5 |6 |0.7828 |+0.2438 |0.5623 |+0.0780 |
## raw unnormalized, spec unnormalized all, complement all
| Machine | Type | ID | AUC | to BL | AUC | to BL |
| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |
|fan | 0 |0 |0.4166 |-0.1275 |0.5182 |+0.0245 |
|fan | 0 |2 |0.9741 |+0.2401 |0.8962 |+0.3481 |
|fan | 0 |4 |0.6807 |+0.0646 |0.6375 |+0.1049 |
|fan | 0 |6 |0.7403 |+0.0011 |0.8455 |+0.3220 |
|pump | 1 |0 |0.7201 |+0.0486 |0.6802 |+0.1128 |
|pump | 1 |2 |0.5097 |-0.1056 |0.5126 |-0.0684 |
|pump | 1 |4 |0.9999 |+0.1166 |0.9995 |+0.3285 |
|pump | 1 |6 |0.8119 |+0.0664 |0.7198 |+0.1396 |
|slider | 2 |0 |0.9993 |+0.0374 |0.9963 |+0.1819 |
|slider | 2 |2 |0.7545 |-0.0352 |0.5470 |-0.0898 |
|slider | 2 |4 |0.9926 |+0.0496 |0.9826 |+0.2628 |
|slider | 2 |6 |0.8804 |+0.1845 |0.6594 |+0.1692 |
|ToyCar | 3 |1 |0.6403 |-0.1733 |0.6499 |-0.0341 |
|ToyCar | 3 |2 |0.7595 |-0.1002 |0.6391 |-0.1381 |
|ToyCar | 3 |3 |0.9774 |+0.3444 |0.9087 |+0.3566 |
|ToyCar | 3 |4 |0.9914 |+0.1469 |0.9558 |+0.2661 |
|ToyConveyor | 4 |1 |0.8422 |+0.0615 |0.7263 |+0.0838 |
|ToyConveyor | 4 |2 |0.5798 |-0.0618 |0.5287 |-0.0314 |
|ToyConveyor | 4 |3 |0.7005 |-0.0530 |0.5779 |-0.0324 |
|valve | 5 |0 |0.7336 |+0.0460 |0.5387 |+0.0217 |
|valve | 5 |2 |0.7018 |+0.0200 |0.5338 |+0.0155 |
|valve | 5 |4 |0.6526 |-0.0904 |0.5092 |-0.0105 |
|valve | 5 |6 |0.8994 |+0.3604 |0.7158 |+0.2315 |
......@@ -15,8 +15,8 @@ def configuration():
descriptor = None
machine_type = 0
machine_id = 0
machine_type = 3
machine_id = 1
batch_size = 512
......
......@@ -5,6 +5,7 @@ import data_sets
import librosa
import sys
import numpy as np
import random
CLASS_MAP = {
'fan': 0,
......@@ -195,6 +196,30 @@ class MCMDataSet(data_sets.BaseDataSet):
return torch.utils.data.ConcatDataset(complement_sets)
class MixUpDataSet(torch.utils.data.Dataset):
def __init__(
self,
data_sets
):
self.data_sets = data_sets
def __getitem__(self, item):
ds_1 = self.data_sets[np.random.random_integers(0, len(self.data_sets)-1)]
sample_1 = ds_1[np.random.random_integers(0, len(ds_1)-1)]
ds_2 = self.data_sets[np.random.random_integers(0, len(self.data_sets)-1)]
sample_2 = ds_2[np.random.random_integers(0, len(ds_2)-1)]
l = np.random.beta(1, 1, size=1).astype(np.float32)
sample_1['observations'] = l * sample_1['observations'] + (1 - l) * sample_2['observations']
return sample_1
def __len__(self):
return 20000
class MachineDataSet(torch.utils.data.Dataset):
def __init__(
......
......@@ -49,18 +49,6 @@ class ClassifiactionExperiment(pl.LightningModule, BaseExperiment):
)
)
self.inf_training_iterator = iter(
self.get_infinite_data_loader(
torch.utils.data.DataLoader(
self.data_set.training_data_set(self.machine_type, self.machine_id),
batch_size=self.objects['batch_size'],
shuffle=True,
num_workers=self.objects['num_workers'],
drop_last=True
)
)
)
self.logger_ = Logger(_run, self, self.configuration_dict, self.objects)
self.epoch = -1
self.step = 0
......@@ -87,14 +75,7 @@ class ClassifiactionExperiment(pl.LightningModule, BaseExperiment):
if optimizer_idx == 0:
batch_normal = self(batch_normal)
#batch_abnormal_0 = next(self.inf_training_iterator)
batch_abnormal_1 = next(self.inf_complement_training_iterator)
#lambdas = np.random.beta(1, 1, size=(self.objects['batch_size'], 1, 1, 1)).astype(np.float32)
#lambdas = torch.from_numpy(np.where(lambdas > 0.5, lambdas, 1.0-lambdas)).to(batch_normal['observations'].device)
#batch_abnormal_1['observations'] = (-lambdas + 1.0) * batch_abnormal_0['observations'] + lambdas * batch_abnormal_1['observations']
batch_abnormal = self(batch_abnormal_1)
batch_abnormal = self(next(self.inf_complement_training_iterator))
reconstruction_loss = self.reconstruction.loss(batch_normal, batch_abnormal)
......
This diff is collapsed.
%% Cell type:code id: tags:
``` python
import dcase2020_task2
from pymongo import MongoClient
import numpy as np
INVERSE_CLASS_MAP = {
0: 'fan',
1: 'pump',
2: 'slider',
3: 'ToyCar',
4: 'ToyConveyor',
5: 'valve'
}
baseline_auc = {
0: {0: 0.5441, 2: 0.7340, 4: 0.6161, 6: 0.7392},
1: {0: 0.6715, 2: 0.6153, 4: 0.8833, 6: 0.7455},
2: {0: 0.9619, 2: 0.7897, 4: 0.9430, 6: 0.6959},
3: {1: 0.8136, 2: 0.8597, 3: 0.6330, 4: 0.8445},
4: {1: 0.7807, 2: 0.6416, 3: 0.7535},
5: {0: 0.6876, 2: 0.6818, 4: 0.7430, 6: 0.5390}
}
baseline_pauc = {
0: {0: 0.4937, 2: 0.5481, 4: 0.5326, 6: 0.5235},
1: {0: 0.5674, 2: 0.5810, 4: 0.6710, 6: 0.5802},
2: {0: 0.8144, 2: 0.6368, 4: 0.7198, 6: 0.4902},
3: {1: 0.6840, 2: 0.7772, 3: 0.5521, 4: 0.6897},
4: {1: 0.6425, 2: 0.5601, 3: 0.6103},
5: {0: 0.5170, 2: 0.5183, 4: 0.5197, 6: 0.4843}
}
```
%% Cell type:code id: tags:
``` python
client = MongoClient('mongodb://student2.cp.jku.at:27017/')
db = client.dcase2020_task2
db.list_collection_names()
```
%%%% Output: execute_result
['fs.chunks',
'omniboard.settings',
'omniboard.metric.columns',
'omniboard.custom.columns',
'fs.files',
'runs',
'metrics']
%% Cell type:code id: tags:
``` python
experiments = []
for experiment in db.runs.find(
{
"experiment.name": "dcase2020_task2_baseline"
}
):
if (experiment['config'].get('normalize', True)) and (experiment['config'].get('normalize_raw', True)):
experiments.append([
experiment['config']['machine_type'],
experiment['config']['machine_id'],
experiment['result']['auroc_mean'],
experiment['result']['pauroc_mean'],
])
print("{} experiments loaded".format(len(experiments)))
assert len(experiments) == 23
```
%%%% Output: stream
23 experiments loaded
%% Cell type:code id: tags:
``` python
print("| Machine | Type | ID | AUC | to BL | AUC | to BL |")
print("| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |")
for e in experiments:
print("|{}\t| {}\t|{}\t|{:0.4f}\t|{:+0.4f}\t|{:0.4f}\t|{:+1.4f}\t|".format(
INVERSE_CLASS_MAP[e[0]],
e[0],
e[1],
e[2],
e[2] - baseline_auc[e[0]][e[1]],
e[3],
e[3] - baseline_pauc[e[0]][e[1]]
))
```
%%%% Output: stream
| Machine | Type | ID | AUC | to BL | AUC | to BL |
| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |
|fan | 0 |0 |0.5639 |+0.0198 |0.4959 |+0.0022 |
|fan | 0 |2 |0.8054 |+0.0714 |0.6094 |+0.0613 |
|fan | 0 |4 |0.6661 |+0.0500 |0.5413 |+0.0087 |
|fan | 0 |6 |0.9022 |+0.1630 |0.7100 |+0.1865 |
|pump | 1 |0 |0.6967 |+0.0252 |0.5396 |-0.0278 |
|pump | 1 |2 |0.6124 |-0.0029 |0.5771 |-0.0039 |
|pump | 1 |4 |0.9497 |+0.0664 |0.7932 |+0.1222 |
|pump | 1 |6 |0.8019 |+0.0564 |0.6068 |+0.0266 |
|slider | 2 |0 |0.9342 |-0.0277 |0.6991 |-0.1153 |
|slider | 2 |2 |0.7753 |-0.0144 |0.6069 |-0.0299 |
|slider | 2 |4 |0.9044 |-0.0386 |0.6257 |-0.0941 |
|slider | 2 |6 |0.6628 |-0.0331 |0.4979 |+0.0077 |
|ToyCar | 3 |1 |0.7976 |-0.0160 |0.6980 |+0.0140 |
|ToyCar | 3 |2 |0.8678 |+0.0081 |0.7776 |+0.0004 |
|ToyCar | 3 |3 |0.6652 |+0.0322 |0.5633 |+0.0112 |
|ToyCar | 3 |4 |0.8859 |+0.0414 |0.7442 |+0.0545 |
|ToyConveyor | 4 |1 |0.7552 |-0.0255 |0.6257 |-0.0168 |
|ToyConveyor | 4 |2 |0.6276 |-0.0140 |0.5590 |-0.0011 |
|ToyConveyor | 4 |3 |0.7387 |-0.0148 |0.5950 |-0.0153 |
|valve | 5 |0 |0.6751 |-0.0125 |0.5179 |+0.0009 |
|valve | 5 |2 |0.6286 |-0.0532 |0.5105 |-0.0078 |
|valve | 5 |4 |0.7339 |-0.0091 |0.5263 |+0.0066 |
|valve | 5 |6 |0.5888 |+0.0498 |0.4947 |+0.0104 |
%% Cell type:code id: tags:
``` python
```
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment