Skip to content

Commit 49a154f

Browse files
author
Guillaume Lemaitre
committed
PEP8
1 parent f6d051a commit 49a154f

File tree

2 files changed

+41
-23
lines changed

2 files changed

+41
-23
lines changed

imblearn/metrics/classification.py

Lines changed: 41 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -536,6 +536,7 @@ def make_indexed_balanced_accuracy(alpha=0.1, squared=True):
536536
>>> print(gmean(y_true, y_pred, average=None))
537537
[ 0.44444444 0.44444444]
538538
"""
539+
539540
def decorate(scoring_func):
540541
@functools.wraps(scoring_func)
541542
def compute_score(*args, **kwargs):
@@ -551,14 +552,20 @@ def compute_score(*args, **kwargs):
551552
average = kwargs.get('average', 'binary')
552553
sample_weight = kwargs.get('sample_weight', None)
553554
# Compute the sensitivity and specificity
554-
dict_sen_spe = {'labels': labels, 'pos_label': pos_label,
555-
'average': average, 'sample_weight': sample_weight}
555+
dict_sen_spe = {
556+
'labels': labels,
557+
'pos_label': pos_label,
558+
'average': average,
559+
'sample_weight': sample_weight
560+
}
556561
sen, spe, _ = sensitivity_specificity_support(*args,
557562
**dict_sen_spe)
558563
# Compute the dominance
559564
dom = sen - spe
560565
return (1. + alpha * dom) * _score
566+
561567
return compute_score
568+
562569
return decorate
563570

564571

@@ -625,8 +632,7 @@ def classification_report_imbalanced(y_true,
625632
name_width = max(len(cn) for cn in target_names)
626633
width = max(name_width, len(last_line_heading), digits)
627634

628-
headers = ["pre", "rec", "spe", "f1",
629-
"geo", "iba", "sup"]
635+
headers = ["pre", "rec", "spe", "f1", "geo", "iba", "sup"]
630636
fmt = '%% %ds' % width # first column: class name
631637
fmt += ' '
632638
fmt += ' '.join(['% 9s' for _ in headers])
@@ -639,26 +645,39 @@ def classification_report_imbalanced(y_true,
639645
# Compute the different metrics
640646
# Precision/recall/f1
641647
precision, recall, f1, support = precision_recall_fscore_support(
642-
y_true, y_pred,
648+
y_true,
649+
y_pred,
643650
labels=labels,
644651
average=None,
645652
sample_weight=sample_weight)
646653
# Specificity
647-
specificity = specificity_score(y_true, y_pred, labels=labels,
648-
average=None, sample_weight=sample_weight)
654+
specificity = specificity_score(
655+
y_true,
656+
y_pred,
657+
labels=labels,
658+
average=None,
659+
sample_weight=sample_weight)
649660
# Geometric mean
650-
geo_mean = geometric_mean_score(y_pred, y_true, labels=labels,
651-
average=None, sample_weight=sample_weight)
661+
geo_mean = geometric_mean_score(
662+
y_pred,
663+
y_true,
664+
labels=labels,
665+
average=None,
666+
sample_weight=sample_weight)
652667
# Indexed balanced accuracy
653-
iba_gmean = make_indexed_balanced_accuracy(alpha=alpha, squared=True)(
654-
geometric_mean_score)
655-
iba = iba_gmean(y_pred, y_true, labels=labels, average=None,
656-
sample_weight=sample_weight)
668+
iba_gmean = make_indexed_balanced_accuracy(
669+
alpha=alpha, squared=True)(geometric_mean_score)
670+
iba = iba_gmean(
671+
y_pred,
672+
y_true,
673+
labels=labels,
674+
average=None,
675+
sample_weight=sample_weight)
657676

658677
for i, label in enumerate(labels):
659678
values = [target_names[i]]
660-
for v in (precision[i], recall[i], specificity[i],
661-
f1[i], geo_mean[i], iba[i]):
679+
for v in (precision[i], recall[i], specificity[i], f1[i], geo_mean[i],
680+
iba[i]):
662681
values += ["{0:0.{1}f}".format(v, digits)]
663682
values += ["{0}".format(support[i])]
664683
report += fmt % tuple(values)
@@ -667,12 +686,13 @@ def classification_report_imbalanced(y_true,
667686

668687
# compute averages
669688
values = [last_line_heading]
670-
for v in (np.average(precision, weights=support),
671-
np.average(recall, weights=support),
672-
np.average(specificity, weights=support),
673-
np.average(f1, weights=support),
674-
np.average(geo_mean, weights=support),
675-
np.average(iba, weights=support)):
689+
for v in (np.average(
690+
precision, weights=support), np.average(
691+
recall, weights=support), np.average(
692+
specificity, weights=support), np.average(
693+
f1, weights=support), np.average(
694+
geo_mean, weights=support), np.average(
695+
iba, weights=support)):
676696
values += ["{0:0.{1}f}".format(v, digits)]
677697
values += ['{0}'.format(np.sum(support))]
678698
report += fmt % tuple(values)

imblearn/metrics/tests/test_classification.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,6 @@
22

33
from __future__ import division, print_function
44

5-
import re
6-
75
from functools import partial
86

97
import numpy as np

0 commit comments

Comments
 (0)