Commit c6c574a9 authored by Paul Primus's avatar Paul Primus
Browse files

add RK3 traning script

parent b77042e3
......@@ -23,7 +23,8 @@ class ComplementMCMDataSet(BaseDataSet):
fmin=0,
normalize_raw=False,
normalize=None,
hop_all=False
hop_all=False,
same_type=False
):
self.data_root = data_root
self.context = context
......@@ -66,22 +67,22 @@ class ComplementMCMDataSet(BaseDataSet):
validation_set.data = (validation_set.data - mean) / std
training_sets = []
validation_sets = []
# validation_sets = []
for type_ in TRAINING_ID_MAP:
for id_ in TRAINING_ID_MAP[type_]:
if type_ != machine_type:
if type_ != machine_type or (id_ != machine_id and same_type):
t = MachineDataSet(type_, id_, mode='training', **kwargs)
t.data = (t.data - mean) / std
v = MachineDataSet(type_, id_, mode='validation', **kwargs)
v.data = (v.data - mean) / std
#v = MachineDataSet(type_, id_, mode='validation', **kwargs)
#v.data = (v.data - mean) / std
training_sets.append(t)
validation_sets.append(v)
# validation_sets.append(v)
self.training_set = torch.utils.data.ConcatDataset(training_sets)
self.validation_set = torch.utils.data.ConcatDataset(validation_sets)
# self.validation_set = torch.utils.data.ConcatDataset(validation_sets)
self.mean = mean
self.std = std
......@@ -93,7 +94,7 @@ class ComplementMCMDataSet(BaseDataSet):
return self.training_set
def validation_data_set(self):
return self.validation_set
raise NotImplementedError
def mean_std(self):
return self.mean, self.std
......
......@@ -155,6 +155,9 @@ def configuration():
num_hidden = 3
dropout_probability = 0.0
# complement set
same_type = True
debug = False
if debug:
num_workers = 0
......@@ -170,7 +173,7 @@ def configuration():
normalize_raw = True
# TODO: change default descriptor
descriptor = "ClassificationExperiment_Model:[{}_{}_{}_{}]_Training:[{}_{}_{}_{}]_Features:[{}_{}_{}_{}_{}_{}_{}]_{}".format(
descriptor = "ClassificationExperiment_Model:[{}_{}_{}_{}]_Training:[{}_{}_{}_{}]_Features:[{}_{}_{}_{}_{}_{}_{}]_Complement:[{}]{}".format(
model_class,
hidden_size,
num_hidden,
......@@ -186,6 +189,7 @@ def configuration():
hop_size,
power,
fmin,
same_type,
seed
)
......@@ -226,7 +230,8 @@ def configuration():
'normalize_raw': normalize_raw,
'power': power,
'fmin': fmin,
'hop_all': False
'hop_all': False,
'same_type': same_type
}
}
......
......@@ -9,13 +9,12 @@ class AUC(BaseLoss):
super().__init__()
self.weight = weight
def forward(self, batch_normal, batch_abnormal):
def forward(self, batch_normal):
assert batch_normal.get('scores'), "cannot compute loss without scores"
assert batch_abnormal.get('scores'), "cannot compute loss without scores"
assert batch_normal.get('scores') is not None, "cannot compute loss without scores"
normal_scores = batch_normal['scores']
abnormal_scores = batch_abnormal['scores']
normal_scores = batch_normal['scores'][batch_normal['abnormal'] == 0]
abnormal_scores = batch_normal['scores'][batch_normal['abnormal'] == 1]
tprs = torch.sigmoid(abnormal_scores[:, None] - normal_scores[None, :]).mean(dim=0)
batch_normal['tpr'] = tprs.mean()
......@@ -28,6 +27,6 @@ class AUC(BaseLoss):
batch_normal['normal_scores_mean'] = normal_scores.mean()
batch_normal['normal_scores_std'] = normal_scores.std()
batch_normal['abnormal_scores_mean'] = abnormal_scores.mean()
batch_normal['abnormal_scores_std'] = normal_scores.std()
batch_normal['abnormal_scores_std'] = abnormal_scores.std()
return batch_normal[f'{self.prefix}_loss']
return batch_normal
......@@ -7,15 +7,14 @@ class NP(BaseLoss):
def __init__(self, weight=1.0, rho=0.2, **kwargs):
super().__init__()
self.weight = weight
self.rho=0.2
self.rho = 0.2
def forward(self, batch_normal, batch_abnormal, *args, **kwargs):
def forward(self, batch_normal, *args, **kwargs):
assert batch_normal.get('scores'), "cannot compute loss without scores"
assert batch_abnormal.get('scores'), "cannot compute loss without scores"
assert batch_normal.get('scores') is not None, "cannot compute loss without scores"
normal_scores = batch_normal['scores']
abnormal_scores = batch_abnormal['scores']
normal_scores = batch_normal['scores'][batch_normal['abnormal'] == 0]
abnormal_scores = batch_normal['scores'][batch_normal['abnormal'] == 1]
with torch.no_grad():
phi = torch.kthvalue(normal_scores, int((1 - self.rho) * normal_scores.shape[0]))[0]
......@@ -30,6 +29,6 @@ class NP(BaseLoss):
batch_normal['normal_scores_mean'] = normal_scores.mean()
batch_normal['normal_scores_std'] = normal_scores.std()
batch_normal['abnormal_scores_mean'] = abnormal_scores.mean()
batch_normal['abnormal_scores_std'] = normal_scores.std()
batch_normal['abnormal_scores_std'] = abnormal_scores.std()
return batch_normal
This diff is collapsed.
This diff is collapsed.
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment