Commit 83c22ad4 authored by Paul Primus's avatar Paul Primus
Browse files

add micro seconds to experiment name

parent a0224285
......@@ -247,3 +247,31 @@
|valve | 5 |4 |0.6526 |-0.0904 |0.5092 |-0.0105 |
|valve | 5 |6 |0.8994 |+0.3604 |0.7158 |+0.2315 |
## raw normalized, spec normalized per mic, complement same mic
23 experiments loaded
| Machine | Type | ID | AUC | to BL | AUC | to BL |
| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |
|fan | 0 |0 |0.6703 |+0.1262 |0.5874 |+0.0937 |
|fan | 0 |2 |0.9831 |+0.2491 |0.9264 |+0.3783 |
|fan | 0 |4 |0.7925 |+0.1764 |0.6645 |+0.1319 |
|fan | 0 |6 |0.7467 |+0.0075 |0.8455 |+0.3220 |
|pump | 1 |0 |0.8555 |+0.1840 |0.7022 |+0.1348 |
|pump | 1 |2 |0.5559 |-0.0594 |0.5386 |-0.0424 |
|pump | 1 |4 |0.9995 |+0.1162 |0.9974 |+0.3264 |
|pump | 1 |6 |0.9425 |+0.1970 |0.8369 |+0.2567 |
|slider | 2 |0 |0.9773 |+0.0154 |0.9117 |+0.0973 |
|slider | 2 |2 |0.8856 |+0.0959 |0.6542 |+0.0174 |
|slider | 2 |4 |0.9857 |+0.0427 |0.9249 |+0.2051 |
|slider | 2 |6 |0.8374 |+0.1415 |0.5571 |+0.0669 |
|ToyCar | 3 |1 |0.6835 |-0.1301 |0.6569 |-0.0271 |
|ToyCar | 3 |2 |0.8717 |+0.0120 |0.7941 |+0.0169 |
|ToyCar | 3 |3 |0.9855 |+0.3525 |0.9338 |+0.3817 |
|ToyCar | 3 |4 |0.9983 |+0.1538 |0.9913 |+0.3016 |
|ToyConveyor | 4 |1 |0.8592 |+0.0785 |0.7649 |+0.1224 |
|ToyConveyor | 4 |2 |0.5898 |-0.0518 |0.5401 |-0.0200 |
|ToyConveyor | 4 |3 |0.7115 |-0.0420 |0.5929 |-0.0174 |
|valve | 5 |0 |0.9913 |+0.3037 |0.9655 |+0.4485 |
|valve | 5 |2 |0.7976 |+0.1158 |0.5561 |+0.0378 |
|valve | 5 |4 |0.7020 |-0.0410 |0.5373 |+0.0176 |
|valve | 5 |6 |0.7919 |+0.2529 |0.5500 |+0.0657 |
\ No newline at end of file
......@@ -62,7 +62,7 @@ def configuration():
}
reconstruction = {
'class': 'reconstructions.MSE',
'class': 'losses.MSE',
'kwargs': {
'weight': 1.0,
'input_shape': '@data_set.observation_shape'
......
......@@ -16,7 +16,7 @@ def configuration():
descriptor = None
machine_type = 3
machine_id = 1
machine_id = 3
batch_size = 512
......@@ -29,11 +29,11 @@ def configuration():
rho = 0.1
feature_context = 'short'
reconstruction_class = 'reconstructions.AUC'
reconstruction_class = 'losses.BCE'
mse_weight = 0.0
model_class = 'models.BaselineFCNN'
normalize = 'per_mic'
normalize = 'all'
normalize_raw = True
complement = 'same_mic'
......
......@@ -29,7 +29,7 @@ def configuration():
rho = 0.1
feature_context = 'short'
reconstruction_class = 'reconstructions.AUC'
reconstruction_class = 'losses.AUC'
mse_weight = 0.0
model_class = 'models.SamplingFCAE'
......
......@@ -69,7 +69,7 @@ class BaselineExperiment(pl.LightningModule, BaseExperiment):
batch_normal['reconstruction_loss'] = reconstruction_loss
batch_normal['prior_loss'] = prior_loss
batch_normal['loss'] = reconstruction_loss + prior_loss
batch_normal['losses'] = reconstruction_loss + prior_loss
self.logger_.log_training_step(batch_normal, self.step)
self.step += 1
......@@ -77,8 +77,8 @@ class BaselineExperiment(pl.LightningModule, BaseExperiment):
raise AttributeError
return {
'loss': batch_normal['loss'],
'tqdm': {'loss': batch_normal['loss']},
'losses': batch_normal['losses'],
'tqdm': {'losses': batch_normal['losses']},
}
def validation_step(self, batch, batch_num):
......
......@@ -92,7 +92,7 @@ class SamplingExperiment(pl.LightningModule, BaseExperiment):
batch_normal['reconstruction_loss'] = reconstruction_loss
batch_normal['prior_loss'] = prior_loss
batch_normal['loss'] = reconstruction_loss + prior_loss
batch_normal['losses'] = reconstruction_loss + prior_loss
self.logger_.log_training_step(batch_normal, self.step)
self.step += 1
......@@ -100,8 +100,8 @@ class SamplingExperiment(pl.LightningModule, BaseExperiment):
raise AttributeError
return {
'loss': batch_normal['loss'],
'tqdm': {'loss': batch_normal['loss']},
'losses': batch_normal['losses'],
'tqdm': {'losses': batch_normal['losses']},
}
def validation_step(self, batch, batch_num):
......
from losses.reconstruction_base import ReconstructionBase
from losses.mse_loss import MSE
from losses.np_loss import NP
from losses.auc_loss import AUC
from losses.bce_loss import BCE
from reconstructions import ReconstructionBase
from losses import ReconstructionBase
import torch
import torch.nn.functional as F
......@@ -16,20 +16,23 @@ class AUC(ReconstructionBase):
batch_normal['normal_scores'] = normal_scores.mean()
batch_normal['abnormal_scores'] = abnormal_scores.mean()
if batch_normal.get('reconstructions'):
batch_normal['mse_normal'] = F.mse_loss(batch_normal['reconstructions'], batch_normal['observations'])
batch_normal['mse_abnormal'] = F.mse_loss(batch_abnormal['reconstructions'], batch_abnormal['observations']) # batch_abnormal['observations'])
if batch_normal.get('losses'):
batch_normal['mse_normal'] = F.mse_loss(batch_normal['losses'], batch_normal['observations'])
batch_normal['mse_abnormal'] = F.mse_loss(batch_abnormal['losses'], batch_abnormal['observations']) # batch_abnormal['observations'])
tprs = torch.sigmoid((abnormal_scores[:, None] - normal_scores[None, :])).mean(dim=0)
tprs = torch.sigmoid(abnormal_scores[:, None] - normal_scores[None, :]).mean(dim=0)
batch_normal['tpr'] = tprs.mean()
batch_normal['fpr'] = 0.5
batch_normal['reconstruction_loss'] = self.weight * -batch_normal['tpr']
a = torch.nn.functional.binary_cross_entropy_with_logits(abnormal_scores, torch.ones_like(abnormal_scores).to(abnormal_scores.device))
b = torch.nn.functional.binary_cross_entropy_with_logits(normal_scores, torch.zeros_like(normal_scores).to(normal_scores.device))
batch_normal['reconstruction_loss'] = self.weight * (-batch_normal['tpr'] + a + b)
return batch_normal['reconstruction_loss']
def forward(self, batch):
batch['visualizations'] = batch['pre_reconstructions']
batch['reconstructions'] = batch['pre_reconstructions']
batch['scores'] = (batch['reconstructions'] - batch['observations']).pow(2).mean(axis=(1, 2, 3))
batch['losses'] = batch['pre_reconstructions']
batch['scores'] = (batch['losses'] - batch['observations']).pow(2).mean(axis=(1, 2, 3))
return batch
from losses import ReconstructionBase
import torch
import torch.nn.functional as F
class BCE(ReconstructionBase):
def __init__(self, weight=1.0, rho=0.2, **kwargs):
super().__init__(weight=weight)
self.rho = rho
def loss(self, batch_normal, batch_abnormal, *args, **kwargs):
normal_scores = batch_normal['scores']
abnormal_scores = batch_abnormal['scores']
batch_normal['normal_scores'] = normal_scores.mean()
batch_normal['abnormal_scores'] = abnormal_scores.mean()
abnormal_loss = torch.nn.functional.binary_cross_entropy_with_logits(abnormal_scores, torch.ones_like(abnormal_scores).to(abnormal_scores.device))
normal_loss = torch.nn.functional.binary_cross_entropy_with_logits(normal_scores, torch.zeros_like(normal_scores).to(normal_scores.device))
batch_normal['reconstruction_loss'] = self.weight * (abnormal_loss + normal_loss)
return batch_normal['reconstruction_loss']
def forward(self, batch):
batch['visualizations'] = batch['pre_reconstructions']
batch['losses'] = batch['pre_reconstructions']
batch['scores'] = (batch['losses'] - batch['observations']).pow(2).mean(axis=(1, 2, 3))
return batch
from reconstructions import ReconstructionBase
from losses import ReconstructionBase
import torch.nn.functional as F
......@@ -10,13 +10,13 @@ class MSE(ReconstructionBase):
self.p = p
def loss(self, batch, *args, **kwargs):
bce = F.mse_loss(batch['reconstructions'], batch['observations'], reduction='mean')
bce = F.mse_loss(batch['losses'], batch['observations'], reduction='mean')
batch['reconstruction_loss'] = self.weight * bce # / (batch['observations'].shape[0])
return batch['reconstruction_loss']
def forward(self, batch):
batch['visualizations'] = batch['pre_reconstructions']
batch['reconstructions'] = batch['pre_reconstructions']
batch['scores'] = (batch['reconstructions'] - batch['observations']).pow(2).mean(axis=(1, 2, 3))
batch['losses'] = batch['pre_reconstructions']
batch['scores'] = (batch['losses'] - batch['observations']).pow(2).mean(axis=(1, 2, 3))
return batch
from reconstructions import ReconstructionBase
from losses import ReconstructionBase
import torch
......@@ -21,6 +21,6 @@ class NP(ReconstructionBase):
def forward(self, batch):
batch['visualizations'] = batch['pre_reconstructions']
batch['reconstructions'] = batch['pre_reconstructions']
batch['scores'] = (batch['reconstructions'] - batch['observations']).pow(2).sum(axis=(1, 2, 3))
batch['losses'] = batch['pre_reconstructions']
batch['scores'] = (batch['losses'] - batch['observations']).pow(2).sum(axis=(1, 2, 3))
return batch
......@@ -71,7 +71,7 @@ class BaselineCNN(torch.nn.Module, VAEBase):
'''
from priors.no_prior import NoPrior
from reconstructions.mse_loss import MSE
from losses.mse_loss import MSE
import torch
input_shape = (1, 128, 5)
......
......@@ -79,6 +79,13 @@ class BaselineFCNN(torch.nn.Module):
torch.nn.Linear(64, 1)
)
self.apply(self.init_weights)
def init_weights(self, m):
if type(m) == torch.nn.Linear:
torch.nn.init.xavier_uniform(m.weight)
m.bias.data.fill_(0.01)
def forward(self, batch):
x = batch['observations']
x = x.view(x.shape[0], -1)
......
from reconstructions.reconstruction_base import ReconstructionBase
from reconstructions.mse_loss import MSE
from reconstructions.np_loss import NP
from reconstructions.auc_loss import AUC
......@@ -29,7 +29,7 @@ class Logger:
def log_training_step(self, batch, step):
if step % 100 == 0:
self.__log_metric__('training_loss', batch.get('loss'), step)
self.__log_metric__('training_loss', batch.get('losses'), step)
self.__log_metric__('training_prior_loss', batch.get('prior_loss'), step)
self.__log_metric__('training_reconstruction_loss', batch.get('reconstruction_loss'), step)
self.__log_metric__('tpr', batch.get('tpr'), step)
......@@ -41,7 +41,7 @@ class Logger:
def log_generator_step(self, batch, step):
if step % 100 == 0:
self.__log_metric__('generator_loss', batch.get('loss'), step)
self.__log_metric__('generator_loss', batch.get('losses'), step)
self.__log_metric__('generator_loss', batch.get('prior_loss'), step)
self.__log_metric__('generator_loss', batch.get('reconstruction_loss'), step)
......
%% Cell type:code id: tags:
``` python
import dcase2020_task2
from pymongo import MongoClient
import numpy as np
INVERSE_CLASS_MAP = {
0: 'fan',
1: 'pump',
2: 'slider',
3: 'ToyCar',
4: 'ToyConveyor',
5: 'valve'
}
baseline_auc = {
0: {0: 0.5441, 2: 0.7340, 4: 0.6161, 6: 0.7392},
1: {0: 0.6715, 2: 0.6153, 4: 0.8833, 6: 0.7455},
2: {0: 0.9619, 2: 0.7897, 4: 0.9430, 6: 0.6959},
3: {1: 0.8136, 2: 0.8597, 3: 0.6330, 4: 0.8445},
4: {1: 0.7807, 2: 0.6416, 3: 0.7535},
5: {0: 0.6876, 2: 0.6818, 4: 0.7430, 6: 0.5390}
}
baseline_pauc = {
0: {0: 0.4937, 2: 0.5481, 4: 0.5326, 6: 0.5235},
1: {0: 0.5674, 2: 0.5810, 4: 0.6710, 6: 0.5802},
2: {0: 0.8144, 2: 0.6368, 4: 0.7198, 6: 0.4902},
3: {1: 0.6840, 2: 0.7772, 3: 0.5521, 4: 0.6897},
4: {1: 0.6425, 2: 0.5601, 3: 0.6103},
5: {0: 0.5170, 2: 0.5183, 4: 0.5197, 6: 0.4843}
}
```
%% Cell type:code id: tags:
``` python
client = MongoClient('mongodb://student2.cp.jku.at:27017/')
db = client.dcase2020_task2
db.list_collection_names()
```
%%%% Output: execute_result
['fs.chunks',
'omniboard.settings',
'omniboard.metric.columns',
'omniboard.custom.columns',
'fs.files',
'runs',
'metrics']
%% Cell type:code id: tags:
``` python
experiments = []
for experiment in db.runs.find(
{
"experiment.name": "dcase2020_task2_classification"
}
):
if experiment['config'].get('descriptor') == 'not_normalized':
experiments.append([
experiment['config']['machine_type'],
experiment['config']['machine_id'],
experiment['result']['auroc_mean'] if experiment['result'] else 0,
experiment['result']['pauroc_mean'] if experiment['result'] else 0,
])
print("{} experiments loaded".format(len(experiments)))
# assert len(experiments) == 23
print("| Machine | Type | ID | AUC | to BL | AUC | to BL |")
print("| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |")
for e in experiments:
print("|{}\t| {}\t|{}\t|{:0.4f}\t|{:+0.4f}\t|{:0.4f}\t|{:+1.4f}\t|".format(
INVERSE_CLASS_MAP[e[0]],
e[0],
e[1],
e[2],
e[2] - baseline_auc[e[0]][e[1]],
e[3],
e[3] - baseline_pauc[e[0]][e[1]]
))
```
%%%% Output: stream
20 experiments loaded
| Machine | Type | ID | AUC | to BL | AUC | to BL |
| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |
|fan | 0 |0 |0.5857 |+0.0416 |0.5846 |+0.0909 |
|fan | 0 |2 |0.9865 |+0.2525 |0.9315 |+0.3834 |
|fan | 0 |4 |0.7753 |+0.1592 |0.6568 |+0.1242 |
|fan | 0 |6 |0.7520 |+0.0128 |0.8455 |+0.3220 |
|pump | 1 |0 |0.8373 |+0.1658 |0.7126 |+0.1452 |
|pump | 1 |2 |0.5577 |-0.0576 |0.5225 |-0.0585 |
|pump | 1 |4 |0.9995 |+0.1162 |0.9974 |+0.3264 |
|pump | 1 |6 |0.9311 |+0.1856 |0.8354 |+0.2552 |
|slider | 2 |0 |0.9946 |+0.0327 |0.9821 |+0.1677 |
|slider | 2 |2 |0.7940 |+0.0043 |0.5494 |-0.0874 |
|slider | 2 |4 |0.9953 |+0.0523 |0.9752 |+0.2554 |
|slider | 2 |6 |0.8202 |+0.1243 |0.5795 |+0.0893 |
|ToyCar | 3 |1 |0.6782 |-0.1354 |0.6485 |-0.0355 |
|ToyCar | 3 |2 |0.8749 |+0.0152 |0.7847 |+0.0075 |
|ToyCar | 3 |3 |0.9746 |+0.3416 |0.9001 |+0.3480 |
|ToyCar | 3 |4 |0.0000 |-0.8445 |0.0000 |-0.6897 |
|ToyCar | 3 |4 |0.9975 |+0.1530 |0.9867 |+0.2970 |
|ToyConveyor | 4 |1 |0.8521 |+0.0714 |0.7509 |+0.1084 |
|ToyConveyor | 4 |2 |0.5919 |-0.0497 |0.5407 |-0.0194 |
|ToyConveyor | 4 |3 |0.0000 |-0.7535 |0.0000 |-0.6103 |
%% Cell type:code id: tags:
``` python
experiments = []
for experiment in db.runs.find(
{
"experiment.name": "dcase2020_task2_classification"
}
):
if experiment['config'].get('descriptor') == 'per_mic_normalized': #'not_normalized':
experiments.append([
experiment['config']['machine_type'],
experiment['config']['machine_id'],
experiment['result']['auroc_max'] if experiment['result'] else 0,
experiment['result']['pauroc_max'] if experiment['result'] else 0,
])
print("{} experiments loaded".format(len(experiments)))
# assert len(experiments) == 23
print("| Machine | Type | ID | AUC | to BL | AUC | to BL |")
print("| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |")
for e in experiments:
print("|{}\t| {}\t|{}\t|{:0.4f}\t|{:+0.4f}\t|{:0.4f}\t|{:+1.4f}\t|".format(
INVERSE_CLASS_MAP[e[0]],
e[0],
e[1],
e[2],
e[2] - baseline_auc[e[0]][e[1]],
e[3],
e[3] - baseline_pauc[e[0]][e[1]]
))
```
%%%% Output: stream
22 experiments loaded
| Machine | Type | ID | AUC | to BL | AUC | to BL |
| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |
|fan | 0 |0 |0.6031 |+0.0590 |0.5327 |+0.0390 |
|fan | 0 |2 |0.9679 |+0.2339 |0.8651 |+0.3170 |
|fan | 0 |4 |0.7782 |+0.1621 |0.6996 |+0.1670 |
|fan | 0 |6 |0.7735 |+0.0343 |0.8456 |+0.3221 |
|pump | 1 |0 |0.8815 |+0.2100 |0.7755 |+0.2081 |
|pump | 1 |2 |0.5438 |-0.0715 |0.5268 |-0.0542 |
|pump | 1 |4 |0.9999 |+0.1166 |0.9995 |+0.3285 |
|pump | 1 |6 |0.9278 |+0.1823 |0.7038 |+0.1236 |
|slider | 2 |0 |0.9807 |+0.0188 |0.9253 |+0.1109 |
|slider | 2 |2 |0.8460 |+0.0563 |0.6611 |+0.0243 |
|slider | 2 |4 |0.9522 |+0.0092 |0.8217 |+0.1019 |
|slider | 2 |6 |0.8180 |+0.1221 |0.5742 |+0.0840 |
|ToyCar | 3 |1 |0.6264 |-0.1872 |0.6610 |-0.0230 |
|ToyCar | 3 |2 |0.7937 |-0.0660 |0.6393 |-0.1379 |
|ToyCar | 3 |3 |0.8065 |+0.1735 |0.7319 |+0.1798 |
|ToyCar | 3 |4 |0.9409 |+0.0964 |0.8307 |+0.1410 |
|ToyConveyor | 4 |1 |0.8453 |+0.0646 |0.7436 |+0.1011 |
|ToyConveyor | 4 |2 |0.5959 |-0.0457 |0.5218 |-0.0383 |
|ToyConveyor | 4 |3 |0.6888 |-0.0647 |0.5803 |-0.0300 |
|valve | 5 |0 |0.9460 |+0.2584 |0.7771 |+0.2601 |
|valve | 5 |2 |0.9258 |+0.2440 |0.7482 |+0.2299 |
|valve | 5 |4 |0.0000 |-0.7430 |0.0000 |-0.5197 |
%% Cell type:code id: tags:
``` python
```
%% Cell type:code id: tags:
``` python
import dcase2020_task2
from pymongo import MongoClient
import numpy as np
INVERSE_CLASS_MAP = {
0: 'fan',
1: 'pump',
2: 'slider',
3: 'ToyCar',
4: 'ToyConveyor',
5: 'valve'
}
baseline_auc = {
0: {0: 0.5441, 2: 0.7340, 4: 0.6161, 6: 0.7392},
1: {0: 0.6715, 2: 0.6153, 4: 0.8833, 6: 0.7455},
2: {0: 0.9619, 2: 0.7897, 4: 0.9430, 6: 0.6959},
3: {1: 0.8136, 2: 0.8597, 3: 0.6330, 4: 0.8445},
4: {1: 0.7807, 2: 0.6416, 3: 0.7535},
5: {0: 0.6876, 2: 0.6818, 4: 0.7430, 6: 0.5390}
}
baseline_pauc = {
0: {0: 0.4937, 2: 0.5481, 4: 0.5326, 6: 0.5235},
1: {0: 0.5674, 2: 0.5810, 4: 0.6710, 6: 0.5802},
2: {0: 0.8144, 2: 0.6368, 4: 0.7198, 6: 0.4902},
3: {1: 0.6840, 2: 0.7772, 3: 0.5521, 4: 0.6897},
4: {1: 0.6425, 2: 0.5601, 3: 0.6103},
5: {0: 0.5170, 2: 0.5183, 4: 0.5197, 6: 0.4843}
}
```
%% Cell type:code id: tags:
``` python
client = MongoClient('mongodb://student2.cp.jku.at:27017/')
db = client.dcase2020_task2
db.list_collection_names()
```
%% Cell type:code id: tags:
``` python
experiments = []
for experiment in db.runs.find(
{
"experiment.name": "dcase2020_task2_classification"
}
):
if experiment['config'].get('descriptor') == 'not_normalized':
experiments.append([
experiment['config']['machine_type'],
experiment['config']['machine_id'],
experiment['result']['auroc_mean'] if experiment['result'] else 0,
experiment['result']['pauroc_mean'] if experiment['result'] else 0,
])
print("{} experiments loaded".format(len(experiments)))
# assert len(experiments) == 23
print("| Machine | Type | ID | AUC | to BL | AUC | to BL |")
print("| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |")
for e in experiments:
print("|{}\t| {}\t|{}\t|{:0.4f}\t|{:+0.4f}\t|{:0.4f}\t|{:+1.4f}\t|".format(
INVERSE_CLASS_MAP[e[0]],
e[0],
e[1],
e[2],
e[2] - baseline_auc[e[0]][e[1]],
e[3],
e[3] - baseline_pauc[e[0]][e[1]]
))
```
%% Cell type:code id: tags:
``` python
experiments = []
for experiment in db.runs.find(
{
"experiment.name": "dcase2020_task2_classification"
}
):
if experiment['config'].get('descriptor') == 'per_mic_normalized': #'not_normalized':
experiments.append([
experiment['config']['machine_type'],
experiment['config']['machine_id'],
experiment['result']['auroc_mean'] if experiment['result'] else 0,
experiment['result']['pauroc_mean'] if experiment['result'] else 0,
])
print("{} experiments loaded".format(len(experiments)))
# assert len(experiments) == 23
print("| Machine | Type | ID | AUC | to BL | AUC | to BL |")
print("| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |")
for e in experiments:
print("|{}\t| {}\t|{}\t|{:0.4f}\t|{:+0.4f}\t|{:0.4f}\t|{:+1.4f}\t|".format(
INVERSE_CLASS_MAP[e[0]],
e[0],
e[1],
e[2],
e[2] - baseline_auc[e[0]][e[1]],
e[3],
e[3] - baseline_pauc[e[0]][e[1]]
))
```
%%%% Output: stream
22 experiments loaded
| Machine | Type | ID | AUC | to BL | AUC | to BL |
| ------- | :--- | :--- | ---------- | ---------- | ---------- | ---------- |
|fan | 0 |0 |0.6683 |+0.1242 |0.6060 |+0.1123 |
|fan | 0 |2 |0.9883 |+0.2543 |0.9456 |+0.3975 |
|fan | 0 |4 |0.7859 |+0.1698 |0.6636 |+0.1310 |
|fan | 0 |6 |0.7462 |+0.0070 |0.8455 |+0.3220 |
|pump | 1 |0 |0.8785 |+0.2070 |0.7284 |+0.1610 |
|pump | 1 |2 |0.5635 |-0.0518 |0.5462 |-0.0348 |
|pump | 1 |4 |0.9995 |+0.1162 |0.9974 |+0.3264 |
|pump | 1 |6 |0.9560 |+0.2105 |0.8653 |+0.2851 |
|slider | 2 |0 |0.9679 |+0.0060 |0.8743 |+0.0599 |
|slider | 2 |2 |0.8813 |+0.0916 |0.6339 |-0.0029 |
|slider | 2 |4 |0.9905 |+0.0475 |0.9500 |+0.2302 |
|slider | 2 |6 |0.8336 |+0.1377 |0.5577 |+0.0675 |
|ToyCar | 3 |1 |0.6665 |-0.1471 |0.6647 |-0.0193 |
|ToyCar | 3 |2 |0.9024 |+0.0427 |0.8195 |+0.0423 |
|ToyCar | 3 |3 |0.9814 |+0.3484 |0.9271 |+0.3750 |
|ToyCar | 3 |4 |0.9982 |+0.1537 |0.9904 |+0.3007 |
|ToyConveyor | 4 |1 |0.8454 |+0.0647 |0.7466 |+0.1041 |
|ToyConveyor | 4 |2 |0.6047 |-0.0369 |0.5448 |-0.0153 |
|ToyConveyor | 4 |3 |0.7071 |-0.0464 |0.5888 |-0.0215 |
|valve | 5 |0 |0.9876 |+0.3000 |0.9518 |+0.4348 |
|valve | 5 |2 |0.7835 |+0.1017 |0.5197 |+0.0014 |
|valve | 5 |4 |0.0000 |-0.7430 |0.0000 |-0.5197 |
%% Cell type:code id: tags:
``` python
```
%% Cell type:code id: tags:
``` python