comapre two models
This commit is contained in:
45
utils.py
45
utils.py
@@ -99,3 +99,48 @@ def scaling_handler(data_frame, method="robust_scaling"):
|
||||
data_frame_scaled["label"] = labels.values
|
||||
|
||||
return data_frame_scaled
|
||||
|
||||
|
||||
from sklearn.metrics import (
|
||||
accuracy_score,
|
||||
f1_score,
|
||||
fbeta_score,
|
||||
precision_score,
|
||||
recall_score,
|
||||
)
|
||||
|
||||
|
||||
def get_metrics(y_true, y_pred, prefix=""):
|
||||
metrics = {}
|
||||
metrics[f"{prefix}accuracy"] = accuracy_score(y_true, y_pred)
|
||||
metrics[f"{prefix}f1_macro"] = f1_score(y_true, y_pred, average="macro")
|
||||
metrics[f"{prefix}f2_macro"] = fbeta_score(y_true, y_pred, beta=2, average="macro")
|
||||
metrics[f"{prefix}recall_macro"] = recall_score(y_true, y_pred, average="macro")
|
||||
metrics[f"{prefix}precision_macro"] = precision_score(
|
||||
y_true, y_pred, average="macro"
|
||||
)
|
||||
|
||||
# Per-class scores
|
||||
f1_scores = f1_score(y_true, y_pred, average=None, zero_division=0)
|
||||
f2_scores = fbeta_score(y_true, y_pred, beta=2, average=None, zero_division=0)
|
||||
recall_scores = recall_score(y_true, y_pred, average=None, zero_division=0)
|
||||
precision_scores = precision_score(y_true, y_pred, average=None, zero_division=0)
|
||||
|
||||
for i in range(len(f1_scores)):
|
||||
metrics[f"{prefix}f1_class{i}"] = f1_scores[i]
|
||||
metrics[f"{prefix}f2_class{i}"] = f2_scores[i]
|
||||
metrics[f"{prefix}recall_class{i}"] = recall_scores[i]
|
||||
metrics[f"{prefix}precision_class{i}"] = precision_scores[i]
|
||||
|
||||
# Confusion-matrix components
|
||||
TP = sum((y_true == 1) & (y_pred == 1))
|
||||
TN = sum((y_true == 0) & (y_pred == 0))
|
||||
FP = sum((y_true == 0) & (y_pred == 1))
|
||||
FN = sum((y_true == 1) & (y_pred == 0))
|
||||
|
||||
metrics[f"{prefix}TP"] = TP
|
||||
metrics[f"{prefix}TN"] = TN
|
||||
metrics[f"{prefix}FP"] = FP
|
||||
metrics[f"{prefix}FN"] = FN
|
||||
|
||||
return metrics
|
||||
|
||||
Reference in New Issue
Block a user