-
-
Notifications
You must be signed in to change notification settings - Fork 25.8k
/
Copy path_ranking.py
2062 lines (1729 loc) · 76.4 KB
/
_ranking.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""Metrics to assess performance on classification task given scores.
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better.
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
import warnings
from functools import partial
from numbers import Integral, Real
import numpy as np
from scipy.integrate import trapezoid
from scipy.sparse import csr_matrix, issparse
from scipy.stats import rankdata
from ..exceptions import UndefinedMetricWarning
from ..preprocessing import label_binarize
from ..utils import (
assert_all_finite,
check_array,
check_consistent_length,
column_or_1d,
)
from ..utils._encode import _encode, _unique
from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params
from ..utils.extmath import stable_cumsum
from ..utils.multiclass import type_of_target
from ..utils.sparsefuncs import count_nonzero
from ..utils.validation import _check_pos_label_consistency, _check_sample_weight
from ._base import _average_binary_score, _average_multiclass_ovo_score
@validate_params(
{"x": ["array-like"], "y": ["array-like"]},
prefer_skip_nested_validation=True,
)
def auc(x, y):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule.
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`. For an alternative
way to summarize a precision-recall curve, see
:func:`average_precision_score`.
Parameters
----------
x : array-like of shape (n,)
X coordinates. These must be either monotonic increasing or monotonic
decreasing.
y : array-like of shape (n,)
Y coordinates.
Returns
-------
auc : float
Area Under the Curve.
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
average_precision_score : Compute average precision from prediction scores.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
np.float64(0.75)
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError(
"At least 2 points are needed to compute area under curve, but x.shape = %s"
% x.shape
)
direction = 1
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("x is neither increasing nor decreasing : {}.".format(x))
area = direction * trapezoid(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in trapezoid do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
@validate_params(
{
"y_true": ["array-like"],
"y_score": ["array-like"],
"average": [StrOptions({"micro", "samples", "weighted", "macro"}), None],
"pos_label": [Real, str, "boolean"],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def average_precision_score(
y_true, y_score, *, average="macro", pos_label=1, sample_weight=None
):
"""Compute average precision (AP) from prediction scores.
AP summarizes a precision-recall curve as the weighted mean of precisions
achieved at each threshold, with the increase in recall from the previous
threshold used as the weight:
.. math::
\\text{AP} = \\sum_n (R_n - R_{n-1}) P_n
where :math:`P_n` and :math:`R_n` are the precision and recall at the nth
threshold [1]_. This implementation is not interpolated and is different
from computing the area under the precision-recall curve with the
trapezoidal rule, which uses linear interpolation and can be too
optimistic.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True binary labels or binary label indicators.
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by :term:`decision_function` on some classifiers).
For :term:`decision_function` scores, values greater than or equal to
zero should indicate the positive class.
average : {'micro', 'samples', 'weighted', 'macro'} or None, \
default='macro'
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
pos_label : int, float, bool or str, default=1
The label of the positive class. Only applied to binary ``y_true``.
For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
average_precision : float
Average precision score.
See Also
--------
roc_auc_score : Compute the area under the ROC curve.
precision_recall_curve : Compute precision-recall pairs for different
probability thresholds.
Notes
-----
.. versionchanged:: 0.19
Instead of linearly interpolating between operating points, precisions
are weighted by the change in recall since the last operating point.
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/w/index.php?title=Information_retrieval&
oldid=793358396#Average_precision>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores)
np.float64(0.83...)
>>> y_true = np.array([0, 0, 1, 1, 2, 2])
>>> y_scores = np.array([
... [0.7, 0.2, 0.1],
... [0.4, 0.3, 0.3],
... [0.1, 0.8, 0.1],
... [0.2, 0.3, 0.5],
... [0.4, 0.4, 0.2],
... [0.1, 0.2, 0.7],
... ])
>>> average_precision_score(y_true, y_scores)
np.float64(0.77...)
"""
def _binary_uninterpolated_average_precision(
y_true, y_score, pos_label=1, sample_weight=None
):
precision, recall, _ = precision_recall_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
# Return the step function integral
# The following works because the last entry of precision is
# guaranteed to be 1, as returned by precision_recall_curve.
# Due to numerical error, we can get `-0.0` and we therefore clip it.
return max(0.0, -np.sum(np.diff(recall) * np.array(precision)[:-1]))
y_type = type_of_target(y_true, input_name="y_true")
# Convert to Python primitive type to avoid NumPy type / Python str
# comparison. See https://github.com/numpy/numpy/issues/6784
present_labels = np.unique(y_true).tolist()
if y_type == "binary":
if len(present_labels) == 2 and pos_label not in present_labels:
raise ValueError(
f"pos_label={pos_label} is not a valid label. It should be "
f"one of {present_labels}"
)
elif y_type == "multilabel-indicator" and pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for multilabel-indicator y_true. "
"Do not set pos_label or set pos_label to 1."
)
elif y_type == "multiclass":
if pos_label != 1:
raise ValueError(
"Parameter pos_label is fixed to 1 for multiclass y_true. "
"Do not set pos_label or set pos_label to 1."
)
y_true = label_binarize(y_true, classes=present_labels)
average_precision = partial(
_binary_uninterpolated_average_precision, pos_label=pos_label
)
return _average_binary_score(
average_precision, y_true, y_score, average, sample_weight=sample_weight
)
@validate_params(
{
"y_true": ["array-like"],
"y_score": ["array-like"],
"pos_label": [Real, str, "boolean", None],
"sample_weight": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def det_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute error rates for different probability thresholds.
.. note::
This metric is used for evaluation of ranking and error tradeoffs of
a binary classification task.
Read more in the :ref:`User Guide <det_curve>`.
.. versionadded:: 0.24
Parameters
----------
y_true : ndarray of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : ndarray of shape of (n_samples,)
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
For :term:`decision_function` scores, values greater than or equal to
zero should indicate the positive class.
pos_label : int, float, bool or str, default=None
The label of the positive class.
When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fpr : ndarray of shape (n_thresholds,)
False positive rate (FPR) such that element i is the false positive
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false acceptance probability or fall-out.
fnr : ndarray of shape (n_thresholds,)
False negative rate (FNR) such that element i is the false negative
rate of predictions with score >= thresholds[i]. This is occasionally
referred to as false rejection or miss rate.
thresholds : ndarray of shape (n_thresholds,)
Decreasing score values.
See Also
--------
DetCurveDisplay.from_estimator : Plot DET curve given an estimator and
some data.
DetCurveDisplay.from_predictions : Plot DET curve given the true and
predicted labels.
DetCurveDisplay : DET curve visualization.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
precision_recall_curve : Compute precision-recall curve.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import det_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, fnr, thresholds = det_curve(y_true, y_scores)
>>> fpr
array([0.5, 0.5, 0. ])
>>> fnr
array([0. , 0.5, 0.5])
>>> thresholds
array([0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight
)
if len(np.unique(y_true)) != 2:
raise ValueError(
"Only one class is present in y_true. Detection error "
"tradeoff curve is not defined in that case."
)
fns = tps[-1] - tps
p_count = tps[-1]
n_count = fps[-1]
# start with false positives zero
first_ind = (
fps.searchsorted(fps[0], side="right") - 1
if fps.searchsorted(fps[0], side="right") > 0
else None
)
# stop with false negatives zero
last_ind = tps.searchsorted(tps[-1]) + 1
sl = slice(first_ind, last_ind)
# reverse the output such that list of false positives is decreasing
return (fps[sl][::-1] / n_count, fns[sl][::-1] / p_count, thresholds[sl][::-1])
def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None):
"""Binary roc auc score."""
if len(np.unique(y_true)) != 2:
warnings.warn(
(
"Only one class is present in y_true. ROC AUC score "
"is not defined in that case."
),
UndefinedMetricWarning,
)
return np.nan
fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight)
if max_fpr is None or max_fpr == 1:
return auc(fpr, tpr)
if max_fpr <= 0 or max_fpr > 1:
raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr)
# Add a single point at max_fpr by linear interpolation
stop = np.searchsorted(fpr, max_fpr, "right")
x_interp = [fpr[stop - 1], fpr[stop]]
y_interp = [tpr[stop - 1], tpr[stop]]
tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp))
fpr = np.append(fpr[:stop], max_fpr)
partial_auc = auc(fpr, tpr)
# McClish correction: standardize result to be 0.5 if non-discriminant
# and 1 if maximal
min_area = 0.5 * max_fpr**2
max_area = max_fpr
return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area))
@validate_params(
{
"y_true": ["array-like"],
"y_score": ["array-like"],
"average": [StrOptions({"micro", "macro", "samples", "weighted"}), None],
"sample_weight": ["array-like", None],
"max_fpr": [Interval(Real, 0.0, 1, closed="right"), None],
"multi_class": [StrOptions({"raise", "ovr", "ovo"})],
"labels": ["array-like", None],
},
prefer_skip_nested_validation=True,
)
def roc_auc_score(
y_true,
y_score,
*,
average="macro",
sample_weight=None,
max_fpr=None,
multi_class="raise",
labels=None,
):
"""Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \
from prediction scores.
Note: this implementation can be used with binary, multiclass and
multilabel classification, but some restrictions apply (see Parameters).
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,) or (n_samples, n_classes)
True labels or binary label indicators. The binary and multiclass cases
expect labels with shape (n_samples,) while the multilabel case expects
binary label indicators with shape (n_samples, n_classes).
y_score : array-like of shape (n_samples,) or (n_samples, n_classes)
Target scores.
* In the binary case, it corresponds to an array of shape
`(n_samples,)`. Both probability estimates and non-thresholded
decision values can be provided. The probability estimates correspond
to the **probability of the class with the greater label**,
i.e. `estimator.classes_[1]` and thus
`estimator.predict_proba(X, y)[:, 1]`. The decision values
corresponds to the output of `estimator.decision_function(X, y)`.
See more information in the :ref:`User guide <roc_auc_binary>`;
* In the multiclass case, it corresponds to an array of shape
`(n_samples, n_classes)` of probability estimates provided by the
`predict_proba` method. The probability estimates **must**
sum to 1 across the possible classes. In addition, the order of the
class scores must correspond to the order of ``labels``,
if provided, or else to the numerical or lexicographical order of
the labels in ``y_true``. See more information in the
:ref:`User guide <roc_auc_multiclass>`;
* In the multilabel case, it corresponds to an array of shape
`(n_samples, n_classes)`. Probability estimates are provided by the
`predict_proba` method and the non-thresholded decision values by
the `decision_function` method. The probability estimates correspond
to the **probability of the class with the greater label for each
output** of the classifier. See more information in the
:ref:`User guide <roc_auc_multilabel>`.
average : {'micro', 'macro', 'samples', 'weighted'} or None, \
default='macro'
If ``None``, the scores for each class are returned.
Otherwise, this determines the type of averaging performed on the data.
Note: multiclass ROC AUC currently only handles the 'macro' and
'weighted' averages. For multiclass targets, `average=None` is only
implemented for `multi_class='ovr'` and `average='micro'` is only
implemented for `multi_class='ovr'`.
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
Will be ignored when ``y_true`` is binary.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
max_fpr : float > 0 and <= 1, default=None
If not ``None``, the standardized partial AUC [2]_ over the range
[0, max_fpr] is returned. For the multiclass case, ``max_fpr``,
should be either equal to ``None`` or ``1.0`` as AUC ROC partial
computation currently is not supported for multiclass.
multi_class : {'raise', 'ovr', 'ovo'}, default='raise'
Only used for multiclass targets. Determines the type of configuration
to use. The default value raises an error, so either
``'ovr'`` or ``'ovo'`` must be passed explicitly.
``'ovr'``:
Stands for One-vs-rest. Computes the AUC of each class
against the rest [3]_ [4]_. This
treats the multiclass case in the same way as the multilabel case.
Sensitive to class imbalance even when ``average == 'macro'``,
because class imbalance affects the composition of each of the
'rest' groupings.
``'ovo'``:
Stands for One-vs-one. Computes the average AUC of all
possible pairwise combinations of classes [5]_.
Insensitive to class imbalance when
``average == 'macro'``.
labels : array-like of shape (n_classes,), default=None
Only used for multiclass targets. List of labels that index the
classes in ``y_score``. If ``None``, the numerical or lexicographical
order of the labels in ``y_true`` is used.
Returns
-------
auc : float
Area Under the Curve score.
See Also
--------
average_precision_score : Area under the precision-recall curve.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic
(ROC) curve given an estimator and some data.
RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic
(ROC) curve given the true and predicted values.
Notes
-----
The Gini Coefficient is a summary measure of the ranking ability of binary
classifiers. It is expressed using the area under of the ROC as follows:
G = 2 * AUC - 1
Where G is the Gini coefficient and AUC is the ROC-AUC score. This normalisation
will ensure that random guessing will yield a score of 0 in expectation, and it is
upper bounded by 1.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
.. [2] `Analyzing a portion of the ROC curve. McClish, 1989
<https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_
.. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving
probability estimation trees (Section 6.2), CeDER Working Paper
#IS-00-04, Stern School of Business, New York University.
.. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern
Recognition Letters, 27(8), 861-874.
<https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_
.. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area
Under the ROC Curve for Multiple Class Classification Problems.
Machine Learning, 45(2), 171-186.
<http://link.springer.com/article/10.1023/A:1010920819831>`_
.. [6] `Wikipedia entry for the Gini coefficient
<https://en.wikipedia.org/wiki/Gini_coefficient>`_
Examples
--------
Binary case:
>>> from sklearn.datasets import load_breast_cancer
>>> from sklearn.linear_model import LogisticRegression
>>> from sklearn.metrics import roc_auc_score
>>> X, y = load_breast_cancer(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X)[:, 1])
np.float64(0.99...)
>>> roc_auc_score(y, clf.decision_function(X))
np.float64(0.99...)
Multiclass case:
>>> from sklearn.datasets import load_iris
>>> X, y = load_iris(return_X_y=True)
>>> clf = LogisticRegression(solver="liblinear").fit(X, y)
>>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr')
np.float64(0.99...)
Multilabel case:
>>> import numpy as np
>>> from sklearn.datasets import make_multilabel_classification
>>> from sklearn.multioutput import MultiOutputClassifier
>>> X, y = make_multilabel_classification(random_state=0)
>>> clf = MultiOutputClassifier(clf).fit(X, y)
>>> # get a list of n_output containing probability arrays of shape
>>> # (n_samples, n_classes)
>>> y_pred = clf.predict_proba(X)
>>> # extract the positive columns for each output
>>> y_pred = np.transpose([pred[:, 1] for pred in y_pred])
>>> roc_auc_score(y, y_pred, average=None)
array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...])
>>> from sklearn.linear_model import RidgeClassifierCV
>>> clf = RidgeClassifierCV().fit(X, y)
>>> roc_auc_score(y, clf.decision_function(X), average=None)
array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...])
"""
y_type = type_of_target(y_true, input_name="y_true")
y_true = check_array(y_true, ensure_2d=False, dtype=None)
y_score = check_array(y_score, ensure_2d=False)
if y_type == "multiclass" or (
y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2
):
# do not support partial ROC computation for multiclass
if max_fpr is not None and max_fpr != 1.0:
raise ValueError(
"Partial AUC computation not available in "
"multiclass setting, 'max_fpr' must be"
" set to `None`, received `max_fpr={0}` "
"instead".format(max_fpr)
)
if multi_class == "raise":
raise ValueError("multi_class must be in ('ovo', 'ovr')")
return _multiclass_roc_auc_score(
y_true, y_score, labels, multi_class, average, sample_weight
)
elif y_type == "binary":
labels = np.unique(y_true)
y_true = label_binarize(y_true, classes=labels)[:, 0]
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
else: # multilabel-indicator
return _average_binary_score(
partial(_binary_roc_auc_score, max_fpr=max_fpr),
y_true,
y_score,
average,
sample_weight=sample_weight,
)
def _multiclass_roc_auc_score(
y_true, y_score, labels, multi_class, average, sample_weight
):
"""Multiclass roc auc score.
Parameters
----------
y_true : array-like of shape (n_samples,)
True multiclass labels.
y_score : array-like of shape (n_samples, n_classes)
Target scores corresponding to probability estimates of a sample
belonging to a particular class
labels : array-like of shape (n_classes,) or None
List of labels to index ``y_score`` used for multiclass. If ``None``,
the lexical order of ``y_true`` is used to index ``y_score``.
multi_class : {'ovr', 'ovo'}
Determines the type of multiclass configuration to use.
``'ovr'``:
Calculate metrics for the multiclass case using the one-vs-rest
approach.
``'ovo'``:
Calculate metrics for the multiclass case using the one-vs-one
approach.
average : {'micro', 'macro', 'weighted'}
Determines the type of averaging performed on the pairwise binary
metric scores
``'micro'``:
Calculate metrics for the binarized-raveled classes. Only supported
for `multi_class='ovr'`.
.. versionadded:: 1.2
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account. Classes
are assumed to be uniformly distributed.
``'weighted'``:
Calculate metrics for each label, taking into account the
prevalence of the classes.
sample_weight : array-like of shape (n_samples,) or None
Sample weights.
"""
# validation of the input y_score
if not np.allclose(1, y_score.sum(axis=1)):
raise ValueError(
"Target scores need to be probabilities for multiclass "
"roc_auc, i.e. they should sum up to 1.0 over classes"
)
# validation for multiclass parameter specifications
average_options = ("macro", "weighted", None)
if multi_class == "ovr":
average_options = ("micro",) + average_options
if average not in average_options:
raise ValueError(
"average must be one of {0} for multiclass problems".format(average_options)
)
multiclass_options = ("ovo", "ovr")
if multi_class not in multiclass_options:
raise ValueError(
"multi_class='{0}' is not supported "
"for multiclass ROC AUC, multi_class must be "
"in {1}".format(multi_class, multiclass_options)
)
if average is None and multi_class == "ovo":
raise NotImplementedError(
"average=None is not implemented for multi_class='ovo'."
)
if labels is not None:
labels = column_or_1d(labels)
classes = _unique(labels)
if len(classes) != len(labels):
raise ValueError("Parameter 'labels' must be unique")
if not np.array_equal(classes, labels):
raise ValueError("Parameter 'labels' must be ordered")
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of given labels, {0}, not equal to the number "
"of columns in 'y_score', {1}".format(len(classes), y_score.shape[1])
)
if len(np.setdiff1d(y_true, classes)):
raise ValueError("'y_true' contains labels not in parameter 'labels'")
else:
classes = _unique(y_true)
if len(classes) != y_score.shape[1]:
raise ValueError(
"Number of classes in y_true not equal to the number of "
"columns in 'y_score'"
)
if multi_class == "ovo":
if sample_weight is not None:
raise ValueError(
"sample_weight is not supported "
"for multiclass one-vs-one ROC AUC, "
"'sample_weight' must be None in this case."
)
y_true_encoded = _encode(y_true, uniques=classes)
# Hand & Till (2001) implementation (ovo)
return _average_multiclass_ovo_score(
_binary_roc_auc_score, y_true_encoded, y_score, average=average
)
else:
# ovr is same as multi-label
y_true_multilabel = label_binarize(y_true, classes=classes)
return _average_binary_score(
_binary_roc_auc_score,
y_true_multilabel,
y_score,
average,
sample_weight=sample_weight,
)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : ndarray of shape (n_samples,)
True targets of binary classification.
y_score : ndarray of shape (n_samples,)
Estimated probabilities or output of a decision function.
pos_label : int, float, bool or str, default=None
The label of the positive class.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
fps : ndarray of shape (n_thresholds,)
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : ndarray of shape (n_thresholds,)
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : ndarray of shape (n_thresholds,)
Decreasing score values.
"""
# Check to make sure y_true is valid
y_type = type_of_target(y_true, input_name="y_true")
if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)):
raise ValueError("{0} format is not supported".format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
# Filter out zero-weighted samples, as they should not impact the result
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
sample_weight = _check_sample_weight(sample_weight, y_true)
nonzero_weight_mask = sample_weight != 0
y_true = y_true[nonzero_weight_mask]
y_score = y_score[nonzero_weight_mask]
sample_weight = sample_weight[nonzero_weight_mask]
pos_label = _check_pos_label_consistency(pos_label, y_true)
# make y_true a boolean vector
y_true = y_true == pos_label
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.0
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presence of floating point errors
fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs]
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
@validate_params(
{
"y_true": ["array-like"],
"y_score": ["array-like", Hidden(None)],
"pos_label": [Real, str, "boolean", None],
"sample_weight": ["array-like", None],
"drop_intermediate": ["boolean"],
"probas_pred": [
"array-like",
Hidden(StrOptions({"deprecated"})),
],
},
prefer_skip_nested_validation=True,
)
def precision_recall_curve(
y_true,
y_score=None,
*,
pos_label=None,
sample_weight=None,
drop_intermediate=False,
probas_pred="deprecated",
):
"""Compute precision-recall pairs for different probability thresholds.
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
y axis.
The first precision and recall values are precision=class balance and recall=1.0
which corresponds to a classifier that always predicts the positive class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array-like of shape (n_samples,)
True binary labels. If labels are not either {-1, 1} or {0, 1}, then
pos_label should be explicitly given.
y_score : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, or non-thresholded measure of decisions (as returned by
`decision_function` on some classifiers).
For :term:`decision_function` scores, values greater than or equal to
zero should indicate the positive class.
pos_label : int, float, bool or str, default=None
The label of the positive class.
When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1},
``pos_label`` is set to 1, otherwise an error will be raised.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
drop_intermediate : bool, default=False
Whether to drop some suboptimal thresholds which would not appear
on a plotted precision-recall curve. This is useful in order to create
lighter precision-recall curves.
.. versionadded:: 1.3
probas_pred : array-like of shape (n_samples,)
Target scores, can either be probability estimates of the positive
class, or non-thresholded measure of decisions (as returned by
`decision_function` on some classifiers).
.. deprecated:: 1.5
`probas_pred` is deprecated and will be removed in 1.7. Use
`y_score` instead.
Returns
-------
precision : ndarray of shape (n_thresholds + 1,)
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : ndarray of shape (n_thresholds + 1,)
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : ndarray of shape (n_thresholds,)
Increasing thresholds on the decision function used to compute
precision and recall where `n_thresholds = len(np.unique(probas_pred))`.
See Also
--------
PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given
a binary classifier.
PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve
using predictions from a binary classifier.
average_precision_score : Compute average precision from prediction scores.
det_curve: Compute error rates for different probability thresholds.
roc_curve : Compute Receiver operating characteristic (ROC) curve.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision
array([0.5 , 0.66666667, 0.5 , 1. , 1. ])
>>> recall
array([1. , 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([0.1 , 0.35, 0.4 , 0.8 ])
"""
# TODO(1.7): remove in 1.7 and reset y_score to be required
# Note: validate params will raise an error if probas_pred is not array-like,
# or "deprecated"
if y_score is not None and not isinstance(probas_pred, str):
raise ValueError(
"`probas_pred` and `y_score` cannot be both specified. Please use `y_score`"
" only as `probas_pred` is deprecated in v1.5 and will be removed in v1.7."
)
if y_score is None:
warnings.warn(
(
"probas_pred was deprecated in version 1.5 and will be removed in 1.7."
"Please use ``y_score`` instead."
),