Coverage for source/plotting/classification_testing_plot_responsibility_chain.py: 97%

106 statements  

« prev     ^ index     » next       coverage.py v7.8.0, created at 2025-08-19 10:43 +0000

1# plotting/classification_testing_plot_responsibility_chain.py 

2 

3# global imports 

4import logging 

5import matplotlib.pyplot as plt 

6import numpy as np 

7from matplotlib.gridspec import GridSpec 

8from sklearn.metrics import RocCurveDisplay 

9 

10# local imports 

11from source.agent import ClassificationTestingStrategyHandler 

12from source.plotting import PlotResponsibilityChainBase 

13 

14class ClassificationTestingPlotResponsibilityChain(PlotResponsibilityChainBase): 

15 """ 

16 Implements a plotting responsibility chain for classification testing results. 

17 It implements the _can_plot and _plot methods to visualize confusion matrices, 

18 classification reports, and ROC curves. 

19 """ 

20 

21 # local constants 

22 __ADDITIONAL_REPORT_LABELS = ["accuracy", "macro avg", "weighted avg"] 

23 

24 def _can_plot(self, key: str) -> bool: 

25 """ 

26 Checks if the plot can be generated for the given key. 

27 

28 Parameters: 

29 key (str): The key to check. 

30 

31 Returns: 

32 (bool): True if the plot can be generated, False otherwise. 

33 """ 

34 

35 return key == ClassificationTestingStrategyHandler.PLOTTING_KEY 

36 

37 def _plot(self, plot_data: dict) -> plt.Axes: 

38 """ 

39 Generates the classification testing plot based on the provided data. 

40 

41 Parameters: 

42 plot_data (dict): The data to be plotted. 

43 

44 Returns: 

45 (plt.Axes): The axes object containing the plot. 

46 """ 

47 

48 conf_matrix = plot_data.get("confusion_matrix", None) 

49 class_report = plot_data.get("classification_report", None) 

50 prediction_probabilities = plot_data.get("prediction_probabilities", None) 

51 true_labels = plot_data.get("true_labels", None) 

52 

53 if conf_matrix is None or class_report is None or prediction_probabilities is None or true_labels is None: 

54 logging.warning(f"Insufficient data for plotting results under key: {ClassificationTestingStrategyHandler.PLOTTING_KEY}.") 

55 plt.text(0.5, 0.5, "Insufficient data for plotting", 

56 ha = 'center', va = 'center', fontsize = 12) 

57 return plt.gca() 

58 

59 additional_report = {} 

60 for additional_label in self.__ADDITIONAL_REPORT_LABELS: 

61 if additional_label in class_report: 

62 additional_report[additional_label] = class_report.pop(additional_label) 

63 

64 fig = plt.figure(figsize = self._EXPECTED_FIGURE_SIZE) 

65 gs = GridSpec(2, 2, figure = fig) 

66 classes = list(class_report.keys()) 

67 shortened_classes_names = [class_name[:3] for class_name in classes] 

68 

69 # Plot 1: Confusion Matrix as a heatmap 

70 ax1 = plt.subplot(gs[0, 0]) 

71 ax1.set_title(f"Confusion Matrix (Accuracy: {additional_report['accuracy']:.2%})") 

72 

73 normalized_conf_matrix = conf_matrix.astype('float') / conf_matrix.sum(axis = 1, keepdims = True) 

74 normalized_conf_matrix = np.round(np.nan_to_num(normalized_conf_matrix, nan = 0.0), 2) 

75 ax1.imshow(normalized_conf_matrix, interpolation = 'nearest', cmap = plt.cm.GnBu) 

76 

77 tick_marks = np.arange(len(classes)) 

78 ax1.set_xticks(tick_marks) 

79 ax1.set_yticks(tick_marks) 

80 ax1.set_xticklabels(shortened_classes_names) 

81 ax1.set_yticklabels(shortened_classes_names) 

82 ax1.set_xlabel('Predicted label') 

83 ax1.set_ylabel('True label') 

84 

85 thresh = np.max(conf_matrix, axis = 1) / 2.0 

86 for i in range(conf_matrix.shape[0]): 

87 for j in range(conf_matrix.shape[1]): 

88 ax1.text(j, i - 0.1, format(conf_matrix[i, j], 'd'), 

89 ha = "center", va = "center", fontsize = 10, weight = 'bold', 

90 color = "white" if conf_matrix[i, j] > thresh[i] else "black") 

91 ax1.text(j, i + 0.15, f'{normalized_conf_matrix[i, j]:.2f}', 

92 ha = "center", va = "center", fontsize = 8, 

93 color = "white" if conf_matrix[i, j] > thresh[i] else "black") 

94 

95 # Plot 2: Precision, Recall, F1 Score Bar Chart 

96 ax2 = plt.subplot(gs[1, 0]) 

97 precision_scores = [] 

98 recall_scores = [] 

99 f1_scores = [] 

100 

101 for metrics_dict in class_report.values(): 

102 precision_scores.append(metrics_dict["precision"]) 

103 recall_scores.append(metrics_dict["recall"]) 

104 f1_scores.append(metrics_dict["f1-score"]) 

105 

106 shift = 0.2 

107 precision_bars = ax2.bar(tick_marks - shift, precision_scores, shift, label = 'Precision') 

108 recall_bars = ax2.bar(tick_marks, recall_scores, shift, label = 'Recall') 

109 f1_bars = ax2.bar(tick_marks + shift, f1_scores, shift, label = 'F1-score') 

110 

111 for i, (precision_bar, recall_bar, f1_bar) in enumerate(zip(precision_bars, recall_bars, f1_bars)): 

112 ax2.text(precision_bar.get_x() + (precision_bar.get_width() / 2), 

113 precision_bar.get_height() + 0.01 if precision_bar.get_height() < 0.9 else \ 

114 precision_bar.get_height() - 0.01, f'{precision_scores[i]:.3f}', 

115 ha = 'center', va = 'bottom' if precision_bar.get_height() < 0.9 else 'top', rotation = 90, 

116 fontsize = 8, weight = 'bold') 

117 ax2.text(recall_bar.get_x() + (recall_bar.get_width() / 2), 

118 recall_bar.get_height() + 0.01 if recall_bar.get_height() < 0.9 else \ 

119 recall_bar.get_height() - 0.01, f'{recall_scores[i]:.3f}', 

120 ha = 'center', va = 'bottom' if recall_bar.get_height() < 0.9 else 'top', rotation = 90, 

121 fontsize = 8, weight = 'bold') 

122 ax2.text(f1_bar.get_x() + (f1_bar.get_width() / 2), 

123 f1_bar.get_height() + 0.01 if f1_bar.get_height() < 0.9 else \ 

124 f1_bar.get_height() - 0.01, f'{f1_scores[i]:.3f}', 

125 ha = 'center', va = 'bottom' if f1_bar.get_height() < 0.9 else 'top', rotation = 90, 

126 fontsize = 8, weight = 'bold') 

127 

128 ax2.set_title('Classification metrics by class') 

129 ax2.set_xticks(tick_marks) 

130 ax2.set_xticklabels(shortened_classes_names) 

131 ax2.set_xlabel('Classes') 

132 ax2.set_ylabel('Score') 

133 ax2.set_ylim([0, 1]) 

134 ax2.legend(fontsize = 'x-small') 

135 

136 # Plot 3: OvR-ROC curves 

137 ax3 = plt.subplot(gs[0, 1]) 

138 

139 for i, class_name in enumerate(classes): 

140 y_true_class_binary = (true_labels == i).astype(int) 

141 y_score = prediction_probabilities[:, i] 

142 RocCurveDisplay.from_predictions(y_true_class_binary, y_score, name = f"{class_name}", 

143 ax = ax3, plot_chance_level = (i == len(classes) - 1)) 

144 

145 ax3.set_title('One-vs-Rest ROC curves') 

146 ax3.set_xlabel('False positive rate') 

147 ax3.set_ylabel('True positive rate') 

148 ax3.grid(alpha = 0.3) 

149 ax3.legend(loc = "lower right", fontsize = 'x-small') 

150 plt.tight_layout() 

151 

152 # Plot 4: Macro avg and weighted avg 

153 ax4 = plt.subplot(gs[1, 1]) 

154 additional_labels = list(additional_report.keys())[1:] 

155 precision_scores = [] 

156 recall_scores = [] 

157 f1_scores = [] 

158 

159 for metrics in additional_report.values(): 

160 if isinstance(metrics, dict): 

161 precision_scores.append(metrics['precision']) 

162 recall_scores.append(metrics['recall']) 

163 f1_scores.append(metrics['f1-score']) 

164 

165 x = np.arange(len(additional_labels)) 

166 precision_bars = ax4.bar(x - shift, precision_scores, shift, label = 'Precision') 

167 recall_bars = ax4.bar(x, recall_scores, shift, label = 'Recall') 

168 f1_bars = ax4.bar(x + shift, f1_scores, shift, label = 'F1-score') 

169 

170 for i, (precision_bar, recall_bar, f1_bar) in enumerate(zip(precision_bars, recall_bars, f1_bars)): 

171 ax4.text(precision_bar.get_x() + (precision_bar.get_width() / 2), 

172 precision_bar.get_height() + 0.01 if precision_bar.get_height() < 0.9 else \ 

173 precision_bar.get_height() - 0.01, f'{precision_scores[i]:.3f}', 

174 ha = 'center', va = 'bottom' if precision_bar.get_height() < 0.9 else 'top', rotation = 90, 

175 fontsize = 8, weight = 'bold') 

176 ax4.text(recall_bar.get_x() + (recall_bar.get_width() / 2), 

177 recall_bar.get_height() + 0.01 if recall_bar.get_height() < 0.9 else \ 

178 recall_bar.get_height() - 0.01, f'{recall_scores[i]:.3f}', 

179 ha = 'center', va = 'bottom' if recall_bar.get_height() < 0.9 else 'top', rotation = 90, 

180 fontsize = 8, weight = 'bold') 

181 ax4.text(f1_bar.get_x() + (f1_bar.get_width() / 2), 

182 f1_bar.get_height() + 0.01 if f1_bar.get_height() < 0.9 else \ 

183 f1_bar.get_height() - 0.01, f'{f1_scores[i]:.3f}', 

184 ha = 'center', va = 'bottom' if f1_bar.get_height() < 0.9 else 'top', rotation = 90, 

185 fontsize = 8, weight = 'bold') 

186 

187 ax4.set_title('Macro avg and weighted avg') 

188 ax4.set_xticks(x) 

189 ax4.set_xticklabels(additional_labels) 

190 ax4.set_xlabel('Metrics') 

191 ax4.set_ylabel('Score') 

192 ax4.set_ylim([0, 1]) 

193 ax4.legend(fontsize = 'x-small') 

194 plt.tight_layout() 

195 

196 return plt.gca()