import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, plot_confusion_matrix
clf =# define your classifier (Decision Tree, Random Forest etc.)
clf.fit(X, y)# fit your classifier# make predictions with your classifier
y_pred = clf.predict(X)# optional: get true negative (tn), false positive (fp)# false negative (fn) and true positive (tp) from confusion matrix
M = confusion_matrix(y, y_pred)
tn, fp, fn, tp = M.ravel()# plotting the confusion matrix
plot_confusion_matrix(clf, X, y)
plt.show()
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(test_Y, predictions_dt)
cm
# after creating the confusion matrix, for better understaning plot the cm.import seaborn as sn
plt.figure(figsize =(10,8))# were 'cmap' is used to set the accent colour
sn.heatmap(cm, annot=True, cmap='flare', fmt='d', cbar=True)
plt.xlabel('Predicted_Label')
plt.ylabel('Truth_Label')
plt.title('Confusion Matrix - Decision Tree')
By definition, entry i,j in a confusion matrix is the number of
observations actually in group i, but predicted to be in group j.
Scikit-Learn provides a confusion_matrix function:from sklearn.metrics import confusion_matrix
y_actu =[2,0,2,2,0,1,1,2,2,0,1,2]
y_pred =[0,0,2,1,0,2,1,0,2,0,2,2]
confusion_matrix(y_actu, y_pred)# Output# array([[3, 0, 0],# [0, 1, 2],# [2, 1, 3]], dtype=int64)
import seaborn as sns
from sklearn.metrics import confusion_matrix
# y_test : actual labels or target# y_preds : predicted labels or target
sns.heatmap(confusion_matrix(y_test, y_preds),annot=True);
# Import confusion matrixfrom sklearn.metrics import confusion_matrix, classification_report
# Fit the model to the training data# Predict the labels of the test data: y_pred# Generate the confusion matrix and classification reportprint(confusion_matrix(y_test, y_pred))print(classification_report(y_test, y_pred))