diff --git a/.github/workflows/python-app.yml b/.github/workflows/python-app.yml index 9d31f43..ebc04db 100644 --- a/.github/workflows/python-app.yml +++ b/.github/workflows/python-app.yml @@ -9,7 +9,9 @@ name: Unit Tests on: push: - branches: [ main, docs_tests ] # run when anything is pushed to these branches + branches: + - main + - docs_tests # run when anything is pushed to these branches pull_request: branches: [ main ] # run for the code submitted as a PR to these branches @@ -61,7 +63,6 @@ jobs: fail_ci_if_error: true verbose: true version: "v0.1.15" - codecov: token: ${{ secrets.CODECOV_TOKEN }} build_docs: diff --git a/MetricsReloaded/metrics/calibration_measures.py b/MetricsReloaded/metrics/calibration_measures.py index 81008eb..b53d30e 100644 --- a/MetricsReloaded/metrics/calibration_measures.py +++ b/MetricsReloaded/metrics/calibration_measures.py @@ -34,12 +34,6 @@ import warnings # from metrics.pairwise_measures import CacheFunctionOutput from MetricsReloaded.utility.utils import ( - CacheFunctionOutput, - max_x_at_y_more, - max_x_at_y_less, - min_x_at_y_more, - min_x_at_y_less, - trapezoidal_integration, one_hot_encode, median_heuristic ) @@ -51,6 +45,25 @@ class CalibrationMeasures(object): + """ + Class allowing the derivation of calibration measures given probability input: + The possible metrics are: + + * expected calibration error (ece) + * Brier Score + * Root Brier score + * Logarithmic score + * Class wise expectation calibration error + * Kernel based ECE + * negative log likelihood + + :param pred_proba: predicted probabilities + :param ref: reference + :param case: if required list of cases to consider + :param measures: list of measures to extract + :param empty: flag indicating whether there are empty references + :param dict_args: dictionary with additional arguments for the metrics if needed + """ def __init__( self, pred_proba, @@ -89,7 +102,7 @@ def class_wise_expectation_calibration_error(self): .. math:: - cwECE = \dfrac{1}{K}\sum_{k=1}^{K}\sum_{i=1}^{N}\dfrac{\vert B_{i,k} \vert}{N} \left(y_{k}(B_{i,k}) - p_{k}(B_{i,k})\right) + cwECE = \\dfrac{1}{K}\sum_{k=1}^{K}\sum_{i=1}^{N}\\dfrac{\\vert B_{i,k} \\vert}{N} \\left(y_{k}(B_{i,k}) - p_{k}(B_{i,k})\\right) :return: cwece """ @@ -103,14 +116,14 @@ def class_wise_expectation_calibration_error(self): range_values = np.arange(0, 1.00001, step) list_values = [] numb_samples = self.pred.shape[0] - class_pred = np.argmax(self.pred, 1) + #class_pred = np.argmax(self.pred, 1) n_classes = self.pred.shape[1] for k in range(n_classes): list_values_k = [] - for (l, u) in zip(range_values[:-1], range_values[1:]): + for (lo, up) in zip(range_values[:-1], range_values[1:]): pred_k = self.pred[:, k] ref_tmp = np.where( - np.logical_and(pred_k > l, pred_k <= u), + np.logical_and(pred_k > lo, pred_k <= up), self.ref, np.ones_like(self.ref) * -1, ) @@ -121,7 +134,7 @@ def class_wise_expectation_calibration_error(self): nsamples = np.size(ref_sel) prop = np.sum(ref_selk) / nsamples pred_tmp = np.where( - np.logical_and(pred_k > l, pred_k <= u), + np.logical_and(pred_k > lo, pred_k <= up), pred_k, np.ones_like(pred_k) * -1, ) @@ -146,7 +159,7 @@ def expectation_calibration_error(self): .. math:: - ECE = \sum_{m=1}^{M} \dfrac{|B_m|}{n}(\dfrac{1}{|B_m|}\sum_{i \in B_m}1(pred_ik==ref_ik)-\dfrac{1}{|B_m|}\sum_{i \in B_m}pred_i) + ECE = \sum_{m=1}^{M} \dfrac{|B_m|}{n}(\dfrac{1}{|B_m|}\sum_{i \in B_m}1(pred_{ik}==ref_{ik})-\dfrac{1}{|B_m|}\sum_{i \in B_m}pred_i) :return: ece @@ -161,9 +174,9 @@ def expectation_calibration_error(self): list_values = [] numb_samples = 0 pred_prob = self.pred[:,1] - for (l, u) in zip(range_values[:-1], range_values[1:]): + for (lo, up) in zip(range_values[:-1], range_values[1:]): ref_tmp = np.where( - np.logical_and(pred_prob > l, pred_prob <= u), + np.logical_and(pred_prob > lo, pred_prob <= up), self.ref, np.ones_like(self.ref) * -1, ) @@ -171,7 +184,7 @@ def expectation_calibration_error(self): nsamples = np.size(ref_sel) prop = np.sum(ref_sel) / nsamples pred_tmp = np.where( - np.logical_and(pred_prob > l, pred_prob <= u), + np.logical_and(pred_prob > lo, pred_prob <= up), pred_prob, np.ones_like(pred_prob) * -1, ) @@ -193,7 +206,7 @@ def maximum_calibration_error(self): .. math:: - MCE = max(|\dfrac{1}{|B_m|}\sum_{i \in B_m}1(pred_ik==ref_ik)-\dfrac{1}{|B_m|}\sum_{i \in B_m}pred_i|) + MCE = max(|\dfrac{1}{|B_m|}\sum_{i \in B_m}1(pred_{ik}==ref_{ik})-\dfrac{1}{|B_m|}\sum_{i \in B_m}pred_i|) :return: mce @@ -206,11 +219,10 @@ def maximum_calibration_error(self): step = 1.0 / nbins range_values = np.arange(0, 1.00001, step) list_values = [] - numb_samples = 0 pred_prob = self.pred[:,1] - for (l, u) in zip(range_values[:-1], range_values[1:]): + for (lo, up) in zip(range_values[:-1], range_values[1:]): ref_tmp = np.where( - np.logical_and(pred_prob > l, pred_prob <= u), + np.logical_and(pred_prob > lo, pred_prob <= up), self.ref, np.ones_like(self.ref) * -1, ) @@ -218,7 +230,7 @@ def maximum_calibration_error(self): nsamples = np.size(ref_sel) prop = np.sum(ref_sel) / nsamples pred_tmp = np.where( - np.logical_and(pred_prob > l, pred_prob <= u), + np.logical_and(pred_prob > lo, pred_prob <= up), pred_prob, np.ones_like(pred_prob) * -1, ) @@ -274,13 +286,12 @@ def logarithmic_score(self): .. math:: - LS = 1/N\sum_{i=1}^{N}\log{pred_ik}ref_{ik} + LS = 1/N\sum_{i=1}^{N}\log{pred_{ik}}ref_{ik} :return: ls """ eps = 1e-10 log_pred = np.log(self.pred + eps) - to_log = self.pred[np.arange(log_pred.shape[0]),self.ref] to_sum = log_pred[np.arange(log_pred.shape[0]),self.ref] ls = np.mean(to_sum) return ls @@ -289,6 +300,9 @@ def distance_ij(self,i,j): """ Determines the euclidean distance between two vectors of prediction for two samples i and j + :param i: index of first sample + :param j: index of second sample with which to calculate distance + :return: distance """ pred_i = self.pred[i,:] @@ -299,7 +313,10 @@ def distance_ij(self,i,j): def kernel_calculation(self, i,j): """ - Defines the kernel value for two samples i and j with the following definition for k(x_i,x_j) + Defines the kernel value for two samples i and j with the following definition for :math:`k(x_i,x_j)` + + :param i: index of first sample + :param j: index of second sample .. math:: @@ -414,13 +431,16 @@ def gamma_ik(self, i, k): """ Definition of gamma value for sample i class k of the predictions + :param i: index of the sample + :param k: index of the class + .. math:: - gamma_{ik} = \Gamma(pred_{ik}/h + 1) + \gamma_{ik} = \Gamma(pred_{ik}/h + 1) where h is the bandwidth value set as default to 0.5 - :return gamma_ik + :return: gamma_ik """ pred_ik = self.pred[i, k] @@ -436,6 +456,9 @@ def dirichlet_kernel(self, j, i): """ Calculation of Dirichlet kernel value for predictions of samples i and j + :param i: index of first sample to consider + :param j: index of second sample to consider + .. math:: k_{Dir}(x_j,x_i) = \dfrac{\Gamma(\sum_{k=1}^{K}\\alpha_{ik})}{\prod_{k=1}^{K}\\alpha_{ik}}\prod_{k=1}^{K}x_jk^{\\alpha_{ik}-1} @@ -470,10 +493,10 @@ def negative_log_likelihood(self): .. math:: - NLL = -\dfrac{1}{N}\sum_{i=1}^{N}\sum_{k=1}^{C} y_{ik} \dot log(p_{i,k}) + NLL = -\dfrac{1}{N}\sum_{i=1}^{N}\sum_{k=1}^{C} y_{ik}\log(p_{i,k}) - where :math: `y_{ik}` the outcome is 1 if the class of :math: `y_{i}` is k and :math: `p_{ik}` is the predicted - probability for sample :math: `x_i` and class k + where :math:`y_{ik}` the outcome is 1 if the class of :math:`y_{i}` is k and :math:`p_{ik}` is the predicted + probability for sample :math:`x_i` and class k :return: NLL @@ -485,7 +508,11 @@ def negative_log_likelihood(self): return nll def to_dict_meas(self, fmt="{:.4f}"): - """Given the selected metrics provides a dictionary with relevant metrics""" + """ + Given the selected metrics provides a dictionary with relevant metrics + + :return: result_dict dictionary of results + """ result_dict = {} for key in self.measures: result = self.measures_dict[key][0]() diff --git a/MetricsReloaded/metrics/pairwise_measures.py b/MetricsReloaded/metrics/pairwise_measures.py index efa2543..3679e3e 100755 --- a/MetricsReloaded/metrics/pairwise_measures.py +++ b/MetricsReloaded/metrics/pairwise_measures.py @@ -39,8 +39,6 @@ import warnings import numpy as np from scipy import ndimage -from functools import partial -from skimage.morphology import skeletonize from MetricsReloaded.utility.utils import ( one_hot_encode, compute_center_of_mass, @@ -50,9 +48,6 @@ ) # from assignment_localization import AssignmentMapping -from scipy.spatial.distance import cdist -import pandas as pd -from scipy.optimize import linear_sum_assignment as lsa __all__ = [ @@ -64,9 +59,19 @@ class MultiClassPairwiseMeasures(object): """ - Class dealing with measures of direct multi-class such as MCC, Cohen's kappa, Expected cost - or balanced accuracy - + Class dealing with measures of direct multi-class. Included metrics are: + + * Matthews Correlation Coefficient (MCC) + * Weithed Cohens kappa + * Balanced accuracy + * Expected Cost + * Normalised expected cost + + :param pred: Prediction + :param ref: Reference + :param list_values: List of label values to consider + :param measures: list of measures to extract + :param dict_args: dictionary of additional arguments for the metrics """ @@ -81,9 +86,26 @@ def __init__(self, pred, ref, list_values, measures=[], dict_args={}): "wck": (self.weighted_cohens_kappa, "WCK"), "ba": (self.balanced_accuracy, "BAcc"), "ec": (self.expected_cost, "EC"), + "nec": (self.normalised_expected_cost,"NEC"), + } def expected_cost(self): + """ + Calculates the expected cost defined as: + + Luciana Ferrer - Analysis and comparison of classification metrics - https://arxiv.org/pdf/2209.05355 + + .. math:: + + EC = \sum_{r}\sum_p c_{rp} P_rD_{rp} + + where :math: `c_{rp}` {is the cost of misclassifying class r as class p. :math: `P_r` is the probability of + class r in the reference data, :math: `D_{rp}` is the fraction of samples of class r that are classified as + class p + + + """ cm = self.confusion_matrix() priors = np.sum(cm, 0) / np.sum(cm) numb_perc = np.sum(cm, 0) @@ -100,6 +122,9 @@ def expected_cost(self): return ec def best_naive_ec(self): + """ + Calculate the naive expected cost that can be used for normalisation purposes + """ cm = self.confusion_matrix() priors = np.sum(cm, 0) / np.sum(cm) prior_matrix = np.tile(priors, [cm.shape[0], 1]) @@ -115,6 +140,9 @@ def best_naive_ec(self): return np.min(total_cost) def normalised_expected_cost(self): + """ + Calculates the normalised expected cost as the ratio of the expected cost to the naive expected cost. + """ naive_cost = self.best_naive_ec() ec = self.expected_cost() return ec / naive_cost @@ -230,7 +258,11 @@ def weighted_cohens_kappa(self): return weighted_cohens_kappa def to_dict_meas(self, fmt="{:.4f}"): - """Given the selected metrics provides a dictionary with relevant metrics""" + """ + + Given the selected metrics provides a dictionary with relevant metrics + + """ result_dict = {} for key in self.measures: result = self.measures_dict[key][0]() @@ -239,6 +271,43 @@ def to_dict_meas(self, fmt="{:.4f}"): class BinaryPairwiseMeasures(object): + """ + Class allowing for the derivation of pairwise measures when using binary input, measures include: + + * accuracy + * net benefit treated + * normalised expected cost + * balanced accuracy + * cohen's kappa + * positive likelihood ratio + * positive predictive value + * negative predictive value + * sensitivity + * specificity + * intersection over union + * youden index + * intersection over reference + * fbeta + * Dice score + * centreline Dice + * Matthew Correlation coefficient + * Average symmetric surface distance + * Mean Average surface distance + * Hausdorff distance + * Percentile of Hausdorff distance + * Normalised surface distance + * boundary IoU + * absolute volume difference ratio + + Input includes: + :param pred: Prediction + :param ref: Reference + :param measures: list of measures to extract + :param connectivity_type: Type of connectivity to use + :param pixdim: list of pixel dimensions + :param empty: + :param dict_args: Dictionary with additional arguments for the different metrics + """ def __init__( self, pred, @@ -298,6 +367,12 @@ def __init__( self.dict_args = dict_args def calculate_worse_dist(self): + """ + From an image for which pixel dimensions and full shape is known, calculates the worst possible distance value. + This is to be used when distance cannot be calculated due to reference or prediction being empty and the worst + value assigned to the metric + :return max_dist: maximum distance for the given case + """ shape = self.ref.shape pixdim = self.pixdim if pixdim is not None: @@ -401,7 +476,7 @@ def n_pos_pred(self): :return: n_pos_pred """ n_pos_pred = np.sum(self.pred) - return np.sum(self.pred) + return n_pos_pred @CacheFunctionOutput def n_neg_pred(self): @@ -657,6 +732,15 @@ def matthews_correlation_coefficient(self): return mcc def expected_matching_ck(self): + """ + Derives p_e for the cohen's kappa calculation. p_e, the expected chance matching is defined as + + .. math:: + + p_e = \sum_k \dfrac{n_{k\\text{ref}}}{N}\dfrac{n_{k\\text{pred}}}{N} + + :return: p_e + """ list_values = np.unique(self.ref) p_e = 0 for val in list_values: @@ -681,7 +765,7 @@ def cohens_kappa(self): CK = \dfrac{p_o - p_e}{1-p_e} - where :math: `p_e = ` expected chance matching and :math: `p_o = `observed accuracy + where :math:`p_e =` expected chance matching and :math:` p_o =` observed accuracy Cohen, J. A coefficient of agreement for nominal scales - Educational and Psychological Measurement (1960) 20 37-46 @@ -784,10 +868,11 @@ def dsc(self): ..math:: + DSC = \dfrac{2TP}{2TP+FP+FN} - This is also F:math:`{\\beta}` for :math:`{\\beta}`=1 + This is also F:math:\`{\\beta}` for :math:\`{\\beta}`=1 :return: dsc @@ -906,6 +991,7 @@ def fppi(self): image, assuming that the cases are collated on the last axis of the array Bram Van Ginneken, Samuel G Armato III, Bartjan de Hoop, Saskia van Amelsvoort-van de Vorst, Thomas Duindam, Meindert Niemeijer, Keelin Murphy, Arnold Schilham, Alessandra Retico, Maria Evelina Fantacci, et al. Comparing and combining algorithms for computer-aided detection of pulmonary nodules in computed tomography scans: the anode09 study. Medical image analysis, 14(6):707–722, 2010. + Andriy I Bandos, Howard E Rockette, Tao Song, and David Gur. Area under the free-response roc curve (froc) and a related summary index. Biometrics, 65(1):247–256, 2009. """ @@ -924,7 +1010,7 @@ def intersection_over_reference(self): .. math:: - IoR = \dfrac{| \text{Pred} \cap \text{Ref} |}{| Ref |} + IoR = \dfrac{| \\text{Pred} \cap \\text{Ref} |}{| Ref |} :return: IoR @@ -962,8 +1048,7 @@ def com_dist(self): of mass of the reference and prediction. - :return: Euclidean distance between centre of mass when reference and prediction not empty - -1 otherwise + :return: Euclidean distance between centre of mass when reference and prediction not empty -1 otherwise """ @@ -1133,7 +1218,8 @@ def boundary_iou(self): B_{IoU}(A,B) = \dfrac{| A_{d} \cap B_{d} |}{|A_d| + |B_d| - |A_d \cap B_d|} - where :math:A_d are the pixels of A within a distance d of the boundary + where :math:`A_d` are the pixels of A within a distance d of the boundary + :return: boundary_iou """ @@ -1188,8 +1274,6 @@ def border_distance(self): """ border_ref = MorphologyOps(self.ref, self.connectivity).border_map() border_pred = MorphologyOps(self.pred, self.connectivity).border_map() - oppose_ref = 1 - self.ref - oppose_pred = 1 - self.pred distance_ref = ndimage.distance_transform_edt( 1 - border_ref, sampling=self.pixdim ) @@ -1317,7 +1401,8 @@ def measured_masd(self): .. math:: - MASD(A,B) = \dfrac{1}{2}(\dfrac{\sum_{a\in A}d(a,B)}{|A|} + \dfrac{\sum_{b\in B}d(b,A)}{|B|}) + MASD(A,B) = \dfrac{1}{2}\dfrac{\sum_{a\in A}d(a,B)}{|A|} + \dfrac{1}{2}\dfrac{\sum_{b\in B}d(b,A)}{|B|} + :return: masd @@ -1352,6 +1437,11 @@ def measured_hausdorff_distance_perc(self): return hausdorff_distance_perc def to_dict_meas(self, fmt="{:.4f}"): + """ + Transform to a dictionary the results of the different calculated measures + + :return: result_dict + """ result_dict = {} for key in self.measures: if len(self.measures_dict[key]) == 2: diff --git a/MetricsReloaded/metrics/prob_pairwise_measures.py b/MetricsReloaded/metrics/prob_pairwise_measures.py index aa5a88f..7d95898 100644 --- a/MetricsReloaded/metrics/prob_pairwise_measures.py +++ b/MetricsReloaded/metrics/prob_pairwise_measures.py @@ -34,8 +34,6 @@ CacheFunctionOutput, max_x_at_y_more, max_x_at_y_less, - min_x_at_y_more, - min_x_at_y_less, trapezoidal_integration, ) @@ -46,6 +44,28 @@ class ProbabilityPairwiseMeasures(object): + """ + Class defining all the pairwise measures based on probability predictions + The following measures can be optained: + - net benefit treated + - auroc + - froc + - average precision + - sensitivity at specificity + - specificity at sensitivity + - sensitivity at ppv + - ppv at sensitivity + - fppi at sensitivity + - sensitivity at ppv + + :param pred_proba: predicted probabilities + :param ref_proba: reference probabilities + :param case: + :param measures: list of the measures to extract + :param empty: + :param dict_args: Dictiionary with the necessary arguments for the different measures if needed + + """ def __init__( self, pred_proba, @@ -81,30 +101,69 @@ def __init__( @CacheFunctionOutput def fp_thr(self, thresh): + """ + Given a threshold probability, return the number of false positive elements: + + :param thresh: Threshold to apply to the probability input + :return fp_thr: Number of FP elements when thresholding the probability to consider things positives at thresh + """ return np.sum(self.__fp_map_thr(thresh)) @CacheFunctionOutput def fn_thr(self, thresh): + """ + Given a threshold probability to determine positive samples, return the number of false negative elements: + + :param thresh: Threshold to apply to the probability input + :return fn_thr: Number of FN elements when thresholding the probability to consider things positives at thresh + """ return np.sum(self.__fn_map_thr(thresh)) @CacheFunctionOutput def tp_thr(self, thresh): + """ + Given a threshold probability to determine positive samples, return the number of true positive elements: + + :param thresh: Threshold to apply to the probability input + :return tp_thr: Number of TP elements when thresholding the probability to consider things positives at thresh + """ return np.sum(self.__tp_map_thr(thresh)) @CacheFunctionOutput def tn_thr(self, thresh): + """ + Given a threshold probability to determine positive samples, return the number of true negative elements: + + :param thresh: Threshold to apply to the probability input + :return tn_thr: Number of TN elements when thresholding the probability to consider things positives at thresh + """ return np.sum(self.__tn_map_thr(thresh)) @CacheFunctionOutput def n_pos_ref(self): + """ + Derive the number of positve elements in the reference + + :return: number of elements in reference + """ return np.sum(self.ref) @CacheFunctionOutput def n_pos_pred(self): + """ + Derive the number of positive elements in the prediction + + :return: number of elements in prediction + """ return np.sum(self.pred) @CacheFunctionOutput def n_neg_ref(self): + """ + Derive the number of negative elements in the reference + + :return number of negative elements in reference + """ return np.sum(1 - self.ref) @CacheFunctionOutput @@ -115,15 +174,17 @@ def all_multi_threshold_values( Function defining the list of values for ppv, sensitivity, specificity and FPPI according to a list of probabilistic thresholds. The thresholds are defined to obtain equal bin sizes The default maximum number of thresholds is 1500 + + :return: unique_new_thresh, list_sens, list_spec, list_ppv, list_ffpi """ unique_thresh, unique_counts = np.unique(self.pred, return_counts=True) - if len(unique_thresh) < max_number_thresh: + if np.size(self.ref) < max_number_samples: unique_new_thresh = unique_thresh - elif np.size(self.ref) < max_number_samples: + elif len(unique_thresh) < max_number_thresh: unique_new_thresh = unique_thresh else: - numb_thresh_temp = np.size(self.ref) / max_number_samples - numb_samples_temp = np.size(self.pred) / max_number_thresh + numb_thresh_temp = np.ceil(np.size(self.ref) / max_number_samples) + numb_samples_temp = np.ceil(np.size(self.pred) / max_number_thresh) unique_new_thresh = [0] current_count = 0 @@ -154,6 +215,10 @@ def all_multi_threshold_values( def __fp_map_thr(self, thresh): """ Map of FP given a specific threshold value + + :param thresh: threshold at which to consider an element of the prediction probability map as positive + + :return: FP map at given threshold """ pred_bin = self.pred >= thresh return np.asarray((pred_bin - self.ref) > 0.0, dtype=np.float32) @@ -162,7 +227,9 @@ def __fn_map_thr(self, thresh): """ This function calculates the false negative map based on a threshold - :return: FN map + :param thresh: threshold at which to consider an element of the prediction probability map as positive + + :return: FN map at given threshold """ pred_bin = self.pred >= thresh return np.asarray((self.ref - pred_bin) > 0.0, dtype=np.float32) @@ -171,6 +238,8 @@ def __tp_map_thr(self, thresh): """ TP map given a specified threshold + :param thresh: threshold at which to consider an element of the prediction probability map as positive + :return: TP map at specified threshold """ pred_bin = self.pred >= thresh @@ -180,6 +249,8 @@ def __tn_map_thr(self, thresh): """ TN map given a specified threshold + :param thresh: threshold at which to consider an element of the prediction probability map as positive + :return: TN map at specified threshold """ pred_bin = self.pred >= thresh @@ -189,16 +260,22 @@ def positive_predictive_values_thr(self, thresh): """ PPV given a specified threshold + :param thresh: threshold at which to consider an element of the prediction probability map as positive + :return: PPV at specified threshold """ - if self.flag_empty: - return -1 + if self.flag_ref_empty and self.flag_pred_empty: + return np.nan + if self.flag_ref_empty and thresh > np.max(np.reshape(self.pred,[1,-1])): + return np.nan return self.tp_thr(thresh) / (self.tp_thr(thresh) + self.fp_thr(thresh)) def specificity_thr(self, thresh): """ Specificity given a specified threshold + :param thresh: threshold at which to consider an element of the prediction probability map as positive + :return: Specificity at specified threshold """ return self.tn_thr(thresh) / self.n_neg_ref() @@ -207,21 +284,36 @@ def sensitivity_thr(self, thresh): """ Sensitivity given a specified threshold + :param thresh: threshold at which to consider an element of the prediction probability map as positive + :return: Sensitivity at specified threshold """ + if self.flag_ref_empty: + return np.nan return self.tp_thr(thresh) / self.n_pos_ref() def fppi_thr(self, thresh): + """ + For the list of individual cases, calculate for a chosen threshold the number of FP elements and average across the cases: + + :param thresh: Threshold at which predictions are considered positives + :return fppi: Average Number of FP per image given a specified probability threshold + + """ if self.case is not None: list_sum = [] - for f in range(np.max(self.case)): - ind_case = np.where(self.case == f)[0] + # print(np.max(self.case)) + for f in range(np.max(self.case)+1): + # print(np.where(self.case==f), self.case, f) + ind_case = np.where(self.case == f)[0][0] + print(ind_case, np.asarray(self.pred[ind_case]), self.ref[ind_case]) case_tmp = ProbabilityPairwiseMeasures( - self.pred[ind_case], self.ref[ind_case] + self.pred[ind_case][0], self.ref[ind_case][0] ) + list_sum.append(case_tmp.fp_thr(thresh)) fppi = np.mean(np.asarray(list_sum)) - else: + else: # Assuming images stacked over last dimension sum_per_image = np.sum( np.reshape(self.__fp_map_thr(thresh), [-1, self.ref.shape[-1]]), axis=0 ) @@ -231,6 +323,8 @@ def fppi_thr(self, thresh): def net_benefit_treated(self): """ Calculation of net benefit given a specified threshold + + :return: net benefit value """ if "benefit_proba" in self.dict_args.keys(): thresh = self.dict_args["benefit_proba"] @@ -277,6 +371,8 @@ def froc(self): Meindert Niemeijer, Keelin Murphy, Arnold Schilham, Alessandra Retico, Maria Evelina Fantacci, et al. 2010. Comparing and combining algorithms for computer-aided detection of pulmonary nodules in computed tomography scans: the ANODE09 study. Medical image analysis 14, 6 (2010), 707–722. + + :return: FROC """ ( unique_thresh, @@ -285,16 +381,18 @@ def froc(self): list_ppv, list_fppi, ) = self.all_multi_threshold_values() + print(list_fppi, unique_thresh) array_fppi = np.asarray(list_fppi) array_sens = np.asarray(list_sens) max_fppi = np.max(array_fppi) added_fppi = np.asarray([1.0/8, 1.0/4, 1.0/2, 1, 2, 4, 8]) added_sens = np.ones([7])*array_sens[-1] if np.max(array_fppi) > 8: - ind = np.where(array_fppi>8) - min_ind = np.min(ind) + ind = np.where(array_fppi>8)[0][0] + # print(ind) array_sens_new = array_sens[:ind] array_fppi_new = array_fppi[:ind] + # print(array_fppi_new, array_sens_new) elif max_fppi < 1.0/8: array_fppi_new = np.concatenate([array_fppi, added_fppi]) array_sens_new = np.concatenate([array_sens, added_sens]) @@ -302,11 +400,13 @@ def froc(self): array_fppi_new = array_fppi array_sens_new = array_sens else: - ind = np.where(added_fppi < max_fppi) - added_fppi_fin = added_fppi[ind:] - added_sens_fin = added_sens[ind:] + ind = np.max(np.where(added_fppi < max_fppi)) + # print(ind) + added_fppi_fin = added_fppi[ind+1:] + added_sens_fin = added_sens[ind+1:] array_fppi_new = np.concatenate([array_fppi, added_fppi_fin]) array_sens_new = np.concatenate([array_sens, added_sens_fin]) + # print(array_fppi_new, array_sens_new) # diff_fppi = array_fppi[1:] - array_fppi[:-1] @@ -438,7 +538,7 @@ def sensitivity_at_fppi(self): if "value_fppi" in self.dict_args.keys(): value_fppi = self.dict_args["value_fppi"] else: - value_fppi = 0.8 + value_fppi = 2 ( unique_thresh, list_sens, @@ -509,6 +609,8 @@ def ppv_at_sensitivity(self): def to_dict_meas(self, fmt="{:.4f}"): """ Transforming the results to form a dictionary + + :return: result_dict """ result_dict = {} for key in self.measures: diff --git a/MetricsReloaded/processes/mixed_measures_processes.py b/MetricsReloaded/processes/mixed_measures_processes.py index 755295b..5f78201 100644 --- a/MetricsReloaded/processes/mixed_measures_processes.py +++ b/MetricsReloaded/processes/mixed_measures_processes.py @@ -15,8 +15,10 @@ ==================================================================================== This module provides classes for performing the evaluation processes of - :ref:`instance segmentation `, :ref:`multi label instance segmentation `, -:ref:`multilabel object detection ` and :ref:`multi class classification `. + :ref:`instance segmentation `, + :ref:`multi label instance segmentation `, + :ref:`multilabel object detection ` and + :ref:`multi class classification `. .. _instanceseg: @@ -49,6 +51,7 @@ .. autoclass:: MultiLabelPairwiseMeasures :members: + """ @@ -91,6 +94,7 @@ class MixedLocSegPairwiseMeasure(object): :param measures_pcc: list of choices of measures of per class counting in terms of classification of instances :param measures_detseg: consideration (list) of metrics combining both segmentation and detection performance :param dict_args: dictionary with relevant arguments for the metrics + """ def __init__( self, @@ -201,12 +205,12 @@ class MultiLabelLocSegPairwiseMeasure(object): This class represents the processing for instance segmentation on true positive Characterised by the predicted classes and associated reference classes - :param pred_class: list for each considered case of classes predicted - :param ref_class:list for each considered case of reference classes + :param pred_class: list for each considered case of classes predicted + :param ref_class: list for each considered case of reference classes :param pred_loc: list for each considered case of the individual image considering an individual predicted element :param ref_loc: list for each considered case of the individual images considering individual reference elements (note that ref_loc and ref_class entities are matching) - :param pred_prob: - :param list_values: list of possible label values + :param pred_prob: list for each considered case of the individual element predicted probabilities + :param list_values: list of possible label values :param measures_pcc: list of per class counting measures to be derived during the process :param measures_overlap: list of overlap (segmentation) measures to be derived during the process :param measures_boundary: list of boundary measures to be derived during the comparison process @@ -220,6 +224,7 @@ class MultiLabelLocSegPairwiseMeasure(object): :param thresh: :param flag_fp_in: flag to consider the false positive elements in the assessment :param dict_args: dictionary for additional arguments related to the chosen metrics. + """ def __init__( @@ -283,6 +288,7 @@ def __init__( def create_nifti_image(self, list_maps, file_ref, category): """ Creates a nifti image of either the true positives, true negatives, false positives or false negatives + :param list_maps: list of np.arrays containing an element to add to the final image :param file_ref: reference nifti file to use for saving the final image :param category: category description of the elements being saved (classically TP TN FP FN) @@ -304,11 +310,12 @@ def per_label_dict(self): According to the specifications of metrics to be used and the type of assignment and localization, performs, per label value in list_values the processing per case (overall prediction associated to overall reference image). This is organised in multiple steps: - - identification for each predicted case of the items considered as of the class specified by label considered - - identification for each associated case, the items considered as of the class specified by the considered label - - creation of the associated list of individual images of elements selected beforehand both in the prediction images and the reference images (the images are listed in the same order with one element per image) - - assigment procedure based on the segmentation images - - derivation of metrics either on a case by case basis or grouping all cases together. + + 1. identification for each predicted case of the items considered as of the class specified by label considered + 2. identification for each associated case, the items considered as of the class specified by the considered label + 3. creation of the associated list of individual images of elements selected beforehand both in the prediction images and the reference images (the images are listed in the same order with one element per image) + 4. assigment procedure based on the segmentation images + 5. derivation of metrics either on a case by case basis or grouping all cases together. """ list_det = [] @@ -325,19 +332,6 @@ def per_label_dict(self): pred_class_case = np.asarray(self.pred_class[case]) ref_class_case = np.asarray(self.ref_class[case]) ind_pred = np.where(pred_class_case == lab) - - # identification of the elements of pred classified according to label lab - pred_tmp = np.where( - pred_class_case == lab, - np.ones_like(pred_class_case), - np.zeros_like(pred_class_case), - ) - # identification of the elements of ref_class classificed according to label lab - ref_tmp = np.where( - ref_class_case == lab, - np.ones_like(ref_class_case), - np.zeros_like(ref_class_case), - ) ind_ref = np.where(ref_class_case == lab) # Creation of the list of individual element images for pred and ref given the chosen label @@ -495,8 +489,9 @@ def per_label_dict(self): class MultiLabelLocMeasures(object): """ Class for the processing of multilabel object detection processes + :param pred_class: list for each considered case of classes predicted - :param ref_class:list for each considered case of reference classes + :param ref_class: list for each considered case of reference classes :param pred_loc: list for each considered case of the individual image considering an individual predicted element :param ref_loc: list for each considered case of the individual images considering individual reference elements (note that ref_loc and ref_class entities are matching) :param pred_prob: @@ -553,6 +548,11 @@ def __init__( self.flag_valid_proba=False def per_label_dict(self): + """ + Process allowing for the creation of dictionaries with the resulting measures for per label detection metrics and multi threshold metrics + + :return: dict_det, dict_mt + """ list_det = [] list_mt = [] for lab in self.list_values: @@ -563,12 +563,6 @@ def per_label_dict(self): pred_arr = np.asarray(self.pred_class[case]) ref_arr = np.asarray(self.ref_class[case]) ind_pred = np.where(pred_arr == lab) - pred_tmp = np.where( - pred_arr == lab, np.ones_like(pred_arr), np.zeros_like(pred_arr) - ) - ref_tmp = np.where( - ref_arr == lab, np.ones_like(ref_arr), np.zeros_like(ref_arr) - ) ind_ref = np.where(ref_arr == lab) pred_loc_tmp = [self.pred_loc[case][f] for f in ind_pred[0]] ref_loc_tmp = [self.ref_loc[case][f] for f in ind_ref[0]] @@ -657,22 +651,24 @@ def per_label_dict(self): class MultiLabelPairwiseMeasures(object): """ Semantic segmentation or Image wide classification with possibility of multiple labels - :param pred: - :param ref: - :param pred_proba: - :param list_values: - :param names: - :param measures_pcc: - :param measures_mt: - :param measures_mcc: - :param measures_overlap: - :param measures_boundary: - :param measures_calibration: - :param connectivity_type: - :param per_case: - :param pixdim: + + :param pred: Prediction data + :param ref: Reference data + :param pred_proba: Prediction probability data + :param list_values: list of label values + :param names: list of names of cases/files + :param measures_pcc: list of per class counting metrics + :param measures_mt: list of multi threshold probability metrics + :param measures_mcc: list of multi class counting metrics + :param measures_overlap: list of overlap metrics + :param measures_boundary: list of boundary metrics + :param measures_calibration: list of calibration metrics + :param connectivity_type: connectivity type -default 1 + :param per_case: flag indicating whether metrics should be calculated overall or per case + :param pixdim: pixel dimension :param empty: - :param dict_args: + :param dict_args: dictionary of specific arguments to be used in the metrics + """ def __init__( self, @@ -724,6 +720,9 @@ def __init__( self.flag_valid_proba = False def squeeze_ref_and_pred_to_size(self): + """ + Utility function to check for number of dimension in each prediction and reference case and squeeze added dimensions if possible + """ for i,(p,r) in enumerate(zip(self.pred, self.ref)): if np.size(np.asarray(p)) == np.size(np.asarray(r)) and np.asarray(p).ndim != np.asarray(r).ndim: warnings.warn("There is a dimensional mismatch between pred and ref despite same size") @@ -734,6 +733,11 @@ def squeeze_ref_and_pred_to_size(self): return def per_label_dict(self): + """ + Process allowing for the creation of dictionaries with the resulting measures for per label binary pairwise metrics and multi threshold metrics + + :return: dict_bin, dict_mt + """ list_bin = [] list_mt = [] for lab in self.list_values: @@ -843,6 +847,11 @@ def per_label_dict(self): return pd.DataFrame.from_dict(list_bin), pd.DataFrame.from_dict(list_mt) def multi_label_res(self): + """ + Creation of the multilabel results with mutilabel counting metrics and calibration metrics returned as separate dictionaries + + :return: pd_mcc, pd_cal + """ list_pred = [] list_ref = [] list_prob = [] diff --git a/MetricsReloaded/processes/overall_process.py b/MetricsReloaded/processes/overall_process.py index 0e4a472..cfba58a 100644 --- a/MetricsReloaded/processes/overall_process.py +++ b/MetricsReloaded/processes/overall_process.py @@ -181,8 +181,7 @@ """ -from MetricsReloaded.metrics.pairwise_measures import BinaryPairwiseMeasures -from MetricsReloaded.processes.mixed_measures_processes import * +from MetricsReloaded.processes.mixed_measures_processes import MultiLabelLocMeasures, MultiLabelPairwiseMeasures, MultiLabelLocSegPairwiseMeasure import warnings from MetricsReloaded.utility.utils import combine_df, merge_list_df import pandas as pd @@ -295,7 +294,7 @@ class ProcessEvaluation(object): """ Performs the evaluation of the data stored in a pickled file according to all the measures, categories and choices of processing - :param data: dictionary containing all the data to be used for the comparison; possible keys include "pred_loc", "ref_loc", "pred_prob", + :param data: dictionary containing all the data to be used for the comparison; possible keys include "pred_loc", "ref_loc", "pred_prob", "ref_missing_pred" :param category: task to be considered choice among ImLC, ObD, SemS, InS :param measures_pcc: list of per class counting measures (these need to be adequate for the chosen task category) :param measures_mcc: list of multi class counting measures @@ -354,6 +353,7 @@ def __init__( self.flag_fp_in = flag_fp_in self.flag_ignore_missing = ignore_missing self.flag_valid = self.check_valid_measures_cat() + self.list_empty_ref = self.identify_empty_ref() self.pixdim = pixdim if self.flag_valid: self.process_data() @@ -551,10 +551,38 @@ def create_mapping_column_nan_replaced_seg(self): def identify_empty_ref(self): - return + """ + Identify empty reference elements in the list of cases to evaluate in all categories except image classification. This is stored as a list of boolean flagging empty cases as True + + :return: list_empty list of boolean indicating whether reference is empty or not + """ + list_empty = [] + if self.category == 'ImLC': + warnings.warn("No need to identify empty reference with image level classification - only suitable for instance segmentation, object detection and image segmentation") + return list_empty + elif self.category in ["ObD", "InS"]: + + for ref_case in self.data['ref_class']: + flag_empty = False + if len(ref_case) == 0: + flag_empty = True + list_empty.append(flag_empty) + return list_empty + else: + for ref_case in self.data['ref_class']: + flag_empty = False + if np.sum(ref_case) == 0: + flag_empty = True + list_empty.append(flag_empty) + return list_empty def complete_missing_cases(self): - if len(self.data['ref_missing']) == 0: + """ + + For all cases with missing predictions, complete according to the options set up - ignoring them or replacing with worse value + + """ + if len(self.data['ref_missing_pred']) == 0: return if self.flag_ignore_missing: warnings.warn("The set up currently ignores any missing case / dataset") @@ -566,7 +594,7 @@ def complete_missing_cases(self): list_missing_mcc = [] numb_valid = len(self.data['ref_class']) if self.case: - for (i,f) in enumerate(self.data['ref_missing']): + for (i,f) in enumerate(self.data['ref_missing_pred']): dict_mt = {} dict_mcc = {} dict_seg = {} @@ -575,7 +603,7 @@ def complete_missing_cases(self): for m in self.measures_mcc: dict_mcc[m] = WORSE[m] list_missing_mcc.append(dict_mcc) - for l in self.data['list_values']: + for lab in self.data['list_values']: dict_seg = {} dict_mt = {} dict_det = {} @@ -592,26 +620,32 @@ def complete_missing_cases(self): dict_seg[m] = WORSE[m] if len(self.measures_boundary) + len(self.measures_overlap) > 0: dict_seg['case'] = i + numb_valid - dict_seg["label"] = l + dict_seg["label"] = lab list_missing_seg.append(dict_seg) if len(self.measures_pcc) + len(self.measures_detseg) > 0 : dict_det['case'] = i + numb_valid - dict_det["label"] = l + dict_det["label"] = lab list_missing_det.append(dict_det) if len(self.measures_mt) > 0: dict_mt['case'] = i + numb_valid - dict_mt["label"] = l + dict_mt["label"] = lab list_missing_mt.append(dict_mt) - df_miss_det = pd.DataFrame.from_dict(list_missing_det) - df_miss_seg = pd.DataFrame.from_dict(list_missing_seg) - df_miss_mcc = pd.DataFrame.from_dict(list_missing_mcc) - df_miss_mt = pd.DataFrame.from_dict(list_missing_mt) - self.resdet = combine_df(self.resdet, df_miss_det) - self.resseg = combine_df(self.resseg, df_miss_seg) - self.resmt = combine_df(self.resmt, df_miss_mt) - self.resmcc = combine_df(self.resmcc, df_miss_mcc) + df_miss_det = pd.DataFrame.from_dict(list_missing_det) + df_miss_seg = pd.DataFrame.from_dict(list_missing_seg) + df_miss_mcc = pd.DataFrame.from_dict(list_missing_mcc) + df_miss_mt = pd.DataFrame.from_dict(list_missing_mt) + self.resdet = combine_df(self.resdet, df_miss_det) + self.resseg = combine_df(self.resseg, df_miss_seg) + self.resmt = combine_df(self.resmt, df_miss_mt) + self.resmcc = combine_df(self.resmcc, df_miss_mcc) + return def label_aggregation(self, option='average',dict_args={}): + """ + Performs the aggregation of the results across labels according to different aggregation strategies + + :return: df_grouped_all dataframe with the results aggregated labels + """ if len(self.data['list_values']) == 1: # print('DET', self.resdet,'CAL',self.rescal, 'SEG',self.resseg,'MT', self.resmt,'MCC', self.resmcc) df_grouped_all = merge_list_df([self.resdet, self.resseg, self.resmt,self.resmcc, self.rescal]) @@ -631,8 +665,7 @@ def label_aggregation(self, option='average',dict_args={}): list_measures = self.measures_boundary + self.measures_overlap + self.measures_detseg + self.measures_pcc + self.measures_mt dict_measures = {k:[('prevalence',wm),('weights',wm2),('average',wm3)] for k in list_measures} df_grouped_lab = df_all_labels.groupby('case',as_index=False).agg(dict_measures).reset_index() - df_grouped_lab.columns = ['_'.join(col).rstrip('_') for col in df_grouped_lab.columns.values -] + df_grouped_lab.columns = ['_'.join(col).rstrip('_') for col in df_grouped_lab.columns.values] # print(df_grouped_lab, " grouped lab ") df_grouped_all = merge_list_df([df_grouped_lab.reset_index(), self.resmcc, self.rescal], on=['case']) @@ -640,6 +673,9 @@ def label_aggregation(self, option='average',dict_args={}): return df_grouped_all def get_stats_res(self): + """ + Create summary statistics overall and per label available in self.stats_lab and self.stats_all + """ df_stats_all = self.grouped_lab.describe() df_all_labels = merge_list_df([self.resdet, self.resseg, self.resmt], on=['label','case']) df_stats_lab = df_all_labels.groupby('label').describe() diff --git a/MetricsReloaded/utility/assignment_localization.py b/MetricsReloaded/utility/assignment_localization.py index b120f12..afee869 100644 --- a/MetricsReloaded/utility/assignment_localization.py +++ b/MetricsReloaded/utility/assignment_localization.py @@ -34,9 +34,6 @@ import warnings from MetricsReloaded.metrics.pairwise_measures import BinaryPairwiseMeasures from MetricsReloaded.utility.utils import ( - intersection_boxes, - area_box, - union_boxes, box_ior, box_iou, guess_input_style, @@ -55,34 +52,40 @@ class AssignmentMapping(object): """ Class allowing the assignment and localization of individual objects of interests. - The localization strategies are either based on box characteristics: - - box_iou - - box_ior - - box_com - or on the masks - - mask_iou - - mask_ior - - mask_com - - boundary_iou - or using only centre of mass - - com_dist - or a mix of mask and box - - point_in_box - or of point and mask - - point_in_mask + The localization strategies are either based on + + * box characteristics: + * box_iou + * box_ior + * box_com + * on the masks + * mask_iou + * mask_ior + * mask_com + * boundary_iou + * using only centre of mass + * com_dist + * a mix of mask and box + * point_in_box + * of point and mask + * point_in_mask + where iou refers to Intersection over Union, IoR to Intersection over Reference, and CoM to Centre of Mass Options to solve assignment ambiguities are one of the following: - - hungarian: minimising assignment cost - - greedy_matching: based on best matching - - greedy_performance: based on probability score + + * hungarian: minimising assignment cost + * greedy_matching: based on best matching + * greedy_performance: based on probability score + flag_fp_in indicates whether or not to consider the double detection of reference objects as false positives or not - :param pred_loc: - :param ref_loc: - :param pred_prob: - :param localization: - :param assignment: - :param pixdim: - :param flag_fp_in: + + :param pred_loc: list of location for each predicted element + :param ref_loc: list of locations for each reference element + :param pred_prob: list of predicted probabilities + :param localization: chosen localisation method - default box_iou + :param assignment: chosen assignment method - default greedy matching + :param pixdim: pixel dimensions + :param flag_fp_in: indicator flag to specify whether the false positives should be considered """ @@ -107,6 +110,8 @@ def __init__( self.flag_fp_in = flag_fp_in self.pixdim = pixdim all_input = [] + self.ref_loc_mod = None + self.pred_loc_mod = None if len(self.pixdim) == 0: if len(pred_loc) > 0: if pred_loc[0].size > 0: @@ -133,11 +138,14 @@ def __init__( flag_usable, flag_predmod, flag_refmod = self.check_input_localization() # self.pred_class = pred_class - + print('Flag', flag_usable, flag_predmod, flag_refmod) # self.ref_class = ref_class self.flag_usable = flag_usable self.flag_predmod = flag_predmod self.flag_refmod = flag_refmod + + + if self.flag_usable: if localization == "box_iou": self.matrix = self.pairwise_boxiou() @@ -160,18 +168,35 @@ def __init__( elif localization == "com_dist": self.matrix = self.pairwise_pointcomdist() else: + print(' not valid localisation ') + self.flag_usable = False + self.df_matching = None + self.valid = None + warnings.warn("No adequate localization strategy chosen - not going ahead") + else: + print(' not valid localisation ') self.flag_usable = False + self.df_matching = None + self.valid = None warnings.warn("No adequate localization strategy chosen - not going ahead") if self.localization in ['point_in_mask','point_in_box']: if self.assignment == 'greedy_matching': self.flag_usable = False + self.df_matching = None + self.valid = None warnings.warn("The localization strategy does not provide grading. Impossible to base assignment on localization performance!") if self.flag_usable: self.df_matching, self.valid = self.resolve_ambiguities_matching() def check_input_localization(self): + """ + Checks whether the provided input to the localisation is usable and if possible reprocess to provide the correct input + + :return: flag_usable, flag_predmod, flag_refmod indicating whether the data is usable and whether prediction and or reference needed to be modified to confirm to the original choices + + """ flag_refmod = False flag_predmod = False flag_usable = True @@ -231,6 +256,7 @@ def check_input_localization(self): return flag_usable, flag_predmod, flag_refmod if input_ref == 'mask': flag_refmod = True + self.box_fromrefmask() warnings.warn('We will need to modify ref to make it interpretable as box corners') elif self.localization == 'com_dist': if input_ref == 'mask': @@ -256,30 +282,55 @@ def check_input_localization(self): return flag_usable, flag_predmod, flag_refmod def com_frompredbox(self): + """ + Loop through the list of box elements from self.pred_loc and identify the centre of mass of the different objects. + Assign the list of centres of mass to self.pred_loc_mod + """ + list_mod = [] for f in range(self.pred_loc.shape[0]): list_mod.append(com_from_box(self.pred_loc[f,...])) self.pred_loc_mod = np.vstack(list_mod) def com_fromrefbox(self): + """ + Loop through the list of reference box elements from self.ref_loc and identify the centre of mass of the different objects. + Assign the list of centres of mass to self.ref_loc_mod + """ + list_mod = [] for f in range(self.ref_loc.shape[0]): list_mod.append(com_from_box(self.ref_loc[f,...])) self.ref_loc_mod = np.vstack(list_mod) def com_frompredmask(self): + """ + Loop through the list of predicted mask elements from self.pred_loc and identify the centre of mass of the different objects. + Assign the list of centres of mass to self.pred_loc_mod + """ + list_mod = [] for f in range(self.pred_loc.shape[0]): list_mod.append(compute_center_of_mass(self.pred_loc[f,...])) self.pred_loc_mod = np.vstack(list_mod) def com_fromrefmask(self): + """ + Loop through the list of reference masks elements from self.ref_loc and identify the centre of mass of the different objects. + Assign the list of centres of mass to self.ref_loc_mod + """ + list_mod = [] for f in range(self.ref_loc.shape[0]): list_mod.append(compute_center_of_mass(self.ref_loc[f,...])) self.ref_loc_mod = np.vstack(list_mod) def box_fromrefmask(self): + """ + Loop through the list of reference masks elements from self.ref_loc and identify the bounding box of the different objects. + Assign the list of boxes of mass to self.ref_loc_mod + """ + list_mod = [] for f in range(self.ref_loc.shape[0]): list_mod.append(compute_box(self.ref_loc[f,...])) @@ -287,6 +338,11 @@ def box_fromrefmask(self): self.ref_loc_mod = np.vstack(list_mod) def box_frompredmask(self): + """ + Loop through the list of predicted masks elements from self.pred_loc and identify the bounding box of the different objects. + Assign the list of boxes of mass to self.pred_loc_mod + """ + list_mod = [] for f in range(self.pred_loc.shape[0]): list_mod.append(compute_box(self.pred_loc[f,...])) @@ -296,6 +352,8 @@ def pairwise_pointcomdist(self): """ Creates a matrix of size numb_prediction elements x number of reference elements indicating the pairwise distance of the centre of mass of the location boxes + + :return: matrix_cdist - matrix of pairwise distance between the centres of mass of the predicted elements and the reference elements """ pred_coms = self.pred_loc ref_coms = self.ref_loc @@ -311,13 +369,15 @@ def pairwise_pointinbox(self): """ Creates a matrix of size number of prediction elements x number of reference elements indicating binarily whether the point representing the prediction element is in the reference box + + :return: matrix_pinb matrix of binary indicating whether a predicted element point is in a reference element box """ ref_boxes = self.ref_loc pred_points = self.pred_loc if self.flag_refmod: ref_boxes = self.ref_loc_mod - if self.flag_predmod: - pred_points = self.pred_loc_mod + # if self.flag_predmod: + # pred_points = self.pred_loc_mod matrix_pinb = np.zeros([pred_points.shape[0],ref_boxes.shape[0]]) for (p, p_point) in enumerate(pred_points): for (r, r_box) in enumerate(ref_boxes): @@ -328,13 +388,15 @@ def pairwise_pointinmask(self): """ Creates a matrix of size number of prediction elements x number of reference elements indicating binarily whether the point representing the prediction element is in the reference mask + + :return: matrix_pinm matrix indicating whether a point in the predicted element is in the mask of reference element in a pairwise manner across all elements of the predicted and reference elements """ ref_masks = self.ref_loc pred_points = self.pred_loc - if self.flag_refmod: - ref_masks = self.ref_loc_mod - if self.flag_predmod: - pred_points = self.pred_loc_mod + # if self.flag_refmod: + # ref_masks = self.ref_loc_mod + # if self.flag_predmod: + # pred_points = self.pred_loc_mod matrix_pinm = np.zeros([pred_points.shape[0],ref_masks.shape[0]]) for (p,p_point) in enumerate(pred_points): for (r,r_mask) in enumerate(ref_masks): @@ -346,6 +408,8 @@ def pairwise_boxiou(self): """ Creates a matrix of size number of prediction elements x number of reference elements indicating the pairwise box iou + + :return: matrix_iou """ ref_box = self.ref_loc pred_box = self.pred_loc @@ -364,6 +428,8 @@ def pairwise_maskior(self): """ Creates a matrix of size number of prediction elements x number of reference elements indicating the pairwise mask ior + + :return: matrix_ior """ matrix_ior = np.zeros([self.pred_loc.shape[0], self.ref_loc.shape[0]]) for p in range(self.pred_loc.shape[0]): @@ -376,6 +442,8 @@ def pairwise_boundaryiou(self): """ Creates a matrix of size number of prediction elements x number of reference elements indicating the pairwise boundary iou + + :return: matrix_biou """ matrix_biou = np.zeros([self.pred_loc.shape[0],self.ref_loc.shape[0]]) for p in range(self.pred_loc.shape[0]): @@ -388,6 +456,8 @@ def pairwise_maskcom(self): """ Creates a matrix of size number of prediction elements x number of reference elements indicating the pairwise distance between mask centre of mass + + :return: matrix_com """ matrix_com = np.zeros([self.pred_loc.shape[0], self.ref_loc.shape[0]]) for p in range(self.pred_loc.shape[0]): @@ -400,6 +470,8 @@ def pairwise_maskiou(self): """ Creates a matrix of size number of prediction elements x number of reference elements indicating the pairwise mask iou. + + :return: matrix_iou """ matrix_iou = np.zeros([self.pred_loc.shape[0], self.ref_loc.shape[0]]) for p in range(self.pred_loc.shape[0]): @@ -412,6 +484,8 @@ def pairwise_boxior(self): """ Creates a matrix of size number of prediction elements x number of reference elements indicating the pairwise box ior + + :return: matrix_ior """ ref_boxes = self.ref_loc pred_boxes = self.pred_loc @@ -430,7 +504,16 @@ def initial_mapping(self): Identifies an original ideal mapping between references and prediction element for all those when there is no ambiguity in the assignment (only one to one matching available). Creates the list of possible options when multiple are possible and populates the relevant dataframes with performance of the - localization metrics and the assigned score probability. + localization metrics and the assigned score probability. The dataframes are as follows: + + * df_matching: where the reference matches for each prediction are indicated + * df_fn: the references elements for which there is no prediction possibly matching + * df_fp: the predicted elements for which there is no reference possibly matching + + list_valid is the list of indices of prediction with a matching reference + + + :return: df_matching, df_fn, df_fp, list_valid """ matrix = self.matrix if self.localization in ['mask_com', 'box_com','com_dist']: @@ -534,13 +617,13 @@ def resolve_ambiguities_matching(self): df_ambiguous_ref = df_matching[ (df_matching["count_ref"] > 1) & (df_matching["ref"] > -1) ] - df_ambiguous_seg = df_matching[ + df_ambiguous_pred = df_matching[ (df_matching["count_pred"] > 1) & (df_matching["pred"] > -1) ] if ( df_ambiguous_ref is None or df_ambiguous_ref.shape[0] == 0 - and df_ambiguous_seg.shape[0] == 0 + and df_ambiguous_pred.shape[0] == 0 ): print("No ambiguity in matching") df_matching_all = pd.concat([df_matching, df_fp, df_fn]) @@ -554,7 +637,7 @@ def resolve_ambiguities_matching(self): list_matching = [] for (r, c) in zip(row, col): df_tmp = df_matching[ - df_matching["seg"] == list_valid[r] & (df_matching["ref"] == c) + df_matching["pred"] == list_valid[r] & (df_matching["ref"] == c) ] list_matching.append(df_tmp) df_ordered2 = pd.concat(list_matching) diff --git a/MetricsReloaded/utility/utils.py b/MetricsReloaded/utility/utils.py index b4d4984..2d33a2b 100644 --- a/MetricsReloaded/utility/utils.py +++ b/MetricsReloaded/utility/utils.py @@ -56,10 +56,6 @@ 'min_x_at_y_less', 'min_x_at_y_more', 'one_hot_encode', - 'to_string_count', - 'to_string_dist', - 'to_string_mt', - 'to_dict_meas_', 'trapezoidal_integration', ] @@ -112,25 +108,34 @@ def border_map(self): border = self.binary_map - eroded return border - def border_map2(self): - """ - Creates the border for a 3D image - :return: - """ - west = ndimage.shift(self.binary_map, [-1, 0, 0], order=0) - east = ndimage.shift(self.binary_map, [1, 0, 0], order=0) - north = ndimage.shift(self.binary_map, [0, 1, 0], order=0) - south = ndimage.shift(self.binary_map, [0, -1, 0], order=0) - top = ndimage.shift(self.binary_map, [0, 0, 1], order=0) - bottom = ndimage.shift(self.binary_map, [0, 0, -1], order=0) - cumulative = west + east + north + south + top + bottom - border = ((cumulative < 6) * self.binary_map) == 1 - return border + # def border_map2(self): + # """ + # Creates the border for a 3D image + # :return: + # """ + # west = ndimage.shift(self.binary_map, [-1, 0, 0], order=0) + # east = ndimage.shift(self.binary_map, [1, 0, 0], order=0) + # north = ndimage.shift(self.binary_map, [0, 1, 0], order=0) + # south = ndimage.shift(self.binary_map, [0, -1, 0], order=0) + # top = ndimage.shift(self.binary_map, [0, 0, 1], order=0) + # bottom = ndimage.shift(self.binary_map, [0, 0, -1], order=0) + # cumulative = west + east + north + south + top + bottom + # border = ((cumulative < 6) * self.binary_map) == 1 + # return border def foreground_component(self): + """ + Create the connected component map from the binary map stored in self.binary_map + + return: label map and number of labels + """ return ndimage.label(self.binary_map) def list_foreground_component(self): + """ + For each connected component from the binary map, create and return as lists per element, + the list of corresponding indices, the list of volumes and the list of centres of mass + """ labels, _ = self.foreground_component() list_ind_lab = [] list_volumes = [] @@ -150,15 +155,14 @@ def intersection_boxes(box1, box2): """ Intersection area/volume between two boxes given their extreme corners - :param: box1 - first box to consider for intersection - :param: box2 - second box to consider for intersection + :param box1: first box to consider for intersection + :param box2: second box to consider for intersection :return: intersection -value of the intersected volume / area as number of pixels / voxels """ min_values = np.minimum(box1, box2) max_values = np.maximum(box1, box2) box_inter = max_values[: min_values.shape[0] // 2] box_inter2 = min_values[max_values.shape[0] // 2 :] - box_intersect = np.concatenate([box_inter, box_inter2]) box_intersect_area = np.prod( np.maximum(box_inter2 + 1 - box_inter, np.zeros_like(box_inter)) ) @@ -169,7 +173,7 @@ def guess_input_style(a): """ Given an array a, guess whether it represents a mask, a box or a centre of mass - :param: a - input array to check + :param a: input array to check :return: string from either mask, box or com """ @@ -185,7 +189,7 @@ def com_from_box(box): """ Identifies the centre of mass of a box from its extreme coordinates - :param: box: box identified as a vector of size 2xndim with first the ndim minimum values and then the ndim maximum values + :param box: box identified as a vector of size 2xndim with first the ndim minimum values and then the ndim maximum values :return: Centre of mass of the box as a vector of size ndim """ min_corner = box[:box.shape[0]//2] @@ -198,8 +202,8 @@ def point_in_box(point, box): """ Indicates whether a point is contained in an axis-aligned box specified by min and maximum corners - :param: point: coordinates of the point to assess - :param: box: vector of size 2 x ndim (2 or 3), the first ndim values corresponding to the minimum corner and the last ndim to the maximum corner + :param point: coordinates of the point to assess + :param box: vector of size 2 x ndim (2 or 3), the first ndim values corresponding to the minimum corner and the last ndim to the maximum corner :return: 1 if the point is in the box 0 otherwise """ min_corner = box[:box.shape[0]//2] @@ -217,8 +221,8 @@ def point_in_mask(point, mask): """ Indicates whether a point (given by coordinates 2D or 3D) is in a mask - :param: point - coordinates of the point to check (list or np-array) - :param: mask - nd array for a segmentation mask + :param point: coordinates of the point to check (list or np-array) + :param mask: nd array for a segmentation mask :return: 1 if the point is in the mask, 0 otherwise """ new_mask = np.zeros_like(mask) @@ -236,8 +240,7 @@ def area_box(box1): """ Determines the area / volume given the coordinates of extreme corners - :param: box extreme corners specified as :math:`x_{min},y_{min},x_{max},y_{max}` or - :math:`x_{min},y_{min},z_{min},x_{max},y_{max},z_{max}` + :param box1: box extreme corners specified as :math:`x_{min},y_{min},x_{max},y_{max}` or :math:`x_{min},y_{min},z_{min},x_{max},y_{max},z_{max}` :return: area/volume of the box (in pixels/voxels) """ box_corner1 = box1[: box1.shape[0] // 2] @@ -249,7 +252,8 @@ def union_boxes(box1, box2): """ Calculates the union of two boxes given their corner coordinates - :param: box1 and box2 specified as for area_box + :param box1:first box considered for union of boxes + :param box2: second box specified for area_box :return: union of two boxes in number of pixels """ value = area_box(box1) + area_box(box2) - intersection_boxes(box1, box2) @@ -260,7 +264,8 @@ def box_iou(box1, box2): """ Calculates the iou of two boxes given their extreme corners coordinates - :param: box1, box2 + :param box1: first box to consider in the iou calculation + :param box2: second box with which to calculate iou :return: intersection over union of the two boxes """ numerator = intersection_boxes(box1, box2) @@ -272,12 +277,21 @@ def box_ior(box1, box2): """ Calculates the intersection over reference between two boxes (reference box being the second one) + :param box1: first box to consider in the ior calculation - counting as prediction box + :param box2: second box with which to calculate ior - counting as reference box + :return: intersection over reference of the two boxes """ numerator = intersection_boxes(box1, box2) denominator = area_box(box2) return numerator / denominator def median_heuristic(matrix_proba): + """ + From a matrix of probabilities return the median of the pairwise distance + + :param matrix_proba: matrix of probabilities + :return: median_heuristic + """ pairwise_dist = squareform(pdist(matrix_proba)) median_heuristic = np.median(pairwise_dist) return median_heuristic @@ -288,7 +302,7 @@ def compute_skeleton(img): """ Computes skeleton using skimage.morphology.skeletonize - :param: img - array with the binary mask of the element to skeletonise + :param img: array with the binary mask of the element to skeletonise :return: nd array with the mask of the skeleton of the element considered in img """ return skeletonize(img) @@ -297,7 +311,7 @@ def compute_box(img): """ Computes the coordinates of the bounding box based on a mask (in img) - :param: img: mask of the element for which to compute bounding box + :param img: mask of the element for which to compute bounding box :return: indices of the bottom left and top right corners of the bounding box axis aligned. """ indices = np.asarray(np.where(img>0)).T @@ -311,14 +325,20 @@ def compute_center_of_mass(img): """ Computes center of mass using scipy.ndimage - :param: img as multidimensional array + :param img: a multidimensional array :return: Returns the centre """ return ndimage.center_of_mass(img) def distance_transform_edt(img, sampling=None): - """Computes Euclidean distance transform using ndimage + """ + Computes Euclidean distance transform using ndimage + + :param img: mask from which to calculate distance + :param sampling: sampling to use (None as default considering isotropic 1 voxels) + :return: distance map + """ return ndimage.distance_transform_edt( img, sampling=sampling, return_indices=False @@ -328,10 +348,10 @@ def max_x_at_y_more(x, y, cut_off): """Gets max of elements in x where elements in y are geq to a cut off value - used in the metrics based on probability thresholds - :param: x: array of values - :param: y: array of values similar length to x - :param: cutoff - value at which to consider the cut-offon y - :param + :param x: array of values + :param y: array of values similar length to x + :param cut_off: value at which to consider the cut-offon y + :return: return the maximum of x for all corresponding values of y greater than or equal to the cut-off """ x = np.asarray(x) @@ -343,11 +363,11 @@ def max_x_at_y_less(x, y, cut_off): """Gets max of elements in x where elements in y are leq to a cut off value - :param: x: array of values - :param: y: array of values similar length to x - :param: cutoff - value at which to consider the cut-offon y - :param - :return: return the maximum of x for all corresponding values of y less than the cut-off + :param x: array of values + :param y: array of values similar length to x + :param cut_off: value at which to consider the cut-offon y + + :return: the maximum of x for all corresponding values of y less than the cut-off """ x = np.asarray(x) y = np.asarray(y) @@ -358,11 +378,11 @@ def min_x_at_y_less(x, y, cut_off): """Gets min of elements in x where elements in y are leq to a cut off value - :param: x: array of values - :param: y: array of values similar length to x - :param: cutoff - value at which to consider the cut-offon y - :param - :return: return the maximum of x for all corresponding values of y less than the cut-off + :param x: array of values + :param y: array of values similar length to x + :param cut_off: value at which to consider the cut-offon y + + :return: the maximum of x for all corresponding values of y less than the cut-off """ x = np.asarray(x) y = np.asarray(y) @@ -373,9 +393,9 @@ def min_x_at_y_more(x,y,cut_off): """Gets min of elements in x where elements in y are greater than cutoff value - :param: x, vector of values - :param: y, vector of values same size of x - :param: cutoff cutoff value for y + :param x: vector of values + :param y: vector of values same size of x + :param cut_off: cutoff value for y :return: min of x where y >= cut_off """ x = np.asarray(x) @@ -386,100 +406,116 @@ def min_x_at_y_more(x,y,cut_off): def one_hot_encode(img, n_classes): """One-hot encodes categorical image - :param: img: labelled nd-array to encode - :param: n_classes: number of classes to consider when encoding - this is specified to avoid "forgetting one class" + :param img: labelled nd-array to encode + :param n_classes: number of classes to consider when encoding - this is specified to avoid "forgetting one class" :return: one hot encoded version of the input labelled image given the number of classes specified """ return np.eye(n_classes)[img] -def to_string_count(measures_count, counting_dict, fmt="{:.4f}"): - result_str = "" - # list_space = ['com_ref', 'com_pred', 'list_labels'] - for key in measures_count: - if len(counting_dict[key]) == 2: - result = counting_dict[key][0]() - else: - result = counting_dict[key][0](counting_dict[key][2]) - result_str += ( - ",".join(fmt.format(x) for x in result) - if isinstance(result, tuple) - else fmt.format(result) - ) - result_str += "," - return result_str[:-1] # trim the last comma - - -def to_string_dist(measures_dist, distance_dict, fmt="{:.4f}"): - """ - Transform to a comma separated string the content of results from the dictionary with all the distance based metrics - - :param: measures_dist: list of distance metrics - :param: distance_dict: dictionary with the results of the distance metrics - :param: fmt: format in which the outputs should be written (default 4 decimal points) - :return: complete comma-separated string of results in the order of keys specifid by measures_dist - """ - result_str = "" - # list_space = ['com_ref', 'com_pred', 'list_labels'] - for key in measures_dist: - if len(distance_dict[key]) == 2: - result = distance_dict[key][0]() - else: - result = distance_dict[key][0](distance_dict[key][2]) - result_str += ( - ",".join(fmt.format(x) for x in result) - if isinstance(result, tuple) - else fmt.format(result) - ) - result_str += "," - return result_str[:-1] # trim the last comma - - -def to_string_mt(measures_mthresh, multi_thresholds_dict, fmt="{:.4f}"): - """ - Transform to a comma separated string the content of results from the dictionary with all the multi-threshold metric - - :param: measures_mthresh: list of multi threshold metrics - :param: multi_thresholds_dict: dictionary with the results of the multi-threshold metrics - :param: fmt: format in which the outputs should be written (default 4 decimal points) - :return: complete comma-separated string of results in the order of keys specifid by measures_mthresh - """ - result_str = "" - # list_space = ['com_ref', 'com_pred', 'list_labels'] - for key in measures_mthresh: - if len(multi_thresholds_dict[key]) == 2: - result = multi_thresholds_dict[key][0]() - else: - result = multi_thresholds_dict[key][0]( - multi_thresholds_dict[key][2] - ) - result_str += ( - ",".join(fmt.format(x) for x in result) - if isinstance(result, tuple) - else fmt.format(result) - ) - result_str += "," - return result_str[:-1] # trim the last comma +# def to_string_count(measures_count, counting_dict, fmt="{:.4f}"): +# """ +# Transform to a comma separated string the content of results from the dictionary with all the counting based metrics + +# :param measures_count: list of counting metrics +# :param counting_dict: dictionary with the results of the counting metrics +# :param fmt: format in which the outputs should be written (default 4 decimal points) +# :return: complete comma-separated string of results in the order of keys specifid by measures_dist +# """ +# result_str = "" +# # list_space = ['com_ref', 'com_pred', 'list_labels'] +# for key in measures_count: +# if len(counting_dict[key]) == 2: +# result = counting_dict[key][0]() +# else: +# result = counting_dict[key][0](counting_dict[key][2]) +# result_str += ( +# ",".join(fmt.format(x) for x in result) +# if isinstance(result, tuple) +# else fmt.format(result) +# ) +# result_str += "," +# return result_str[:-1] # trim the last comma + + +# def to_string_dist(measures_dist, distance_dict, fmt="{:.4f}"): +# """ +# Transform to a comma separated string the content of results from the dictionary with all the distance based metrics + +# :param measures_dist: list of distance metrics +# :param distance_dict: dictionary with the results of the distance metrics +# :param fmt: format in which the outputs should be written (default 4 decimal points) +# :return: complete comma-separated string of results in the order of keys specifid by measures_dist +# """ +# result_str = "" +# # list_space = ['com_ref', 'com_pred', 'list_labels'] +# for key in measures_dist: +# if len(distance_dict[key]) == 2: +# result = distance_dict[key][0]() +# else: +# result = distance_dict[key][0](distance_dict[key][2]) +# result_str += ( +# ",".join(fmt.format(x) for x in result) +# if isinstance(result, tuple) +# else fmt.format(result) +# ) +# result_str += "," +# return result_str[:-1] # trim the last comma + + +# def to_string_mt(measures_mthresh, multi_thresholds_dict, fmt="{:.4f}"): +# """ +# Transform to a comma separated string the content of results from the dictionary with all the multi-threshold metric + +# :param measures_mthresh: list of multi threshold metrics +# :param multi_thresholds_dict: dictionary with the results of the multi-threshold metrics +# :param fmt: format in which the outputs should be written (default 4 decimal points) +# :return: complete comma-separated string of results in the order of keys specifid by measures_mthresh +# """ +# result_str = "" +# # list_space = ['com_ref', 'com_pred', 'list_labels'] +# for key in measures_mthresh: +# if len(multi_thresholds_dict[key]) == 2: +# result = multi_thresholds_dict[key][0]() +# else: +# result = multi_thresholds_dict[key][0]( +# multi_thresholds_dict[key][2] +# ) +# result_str += ( +# ",".join(fmt.format(x) for x in result) +# if isinstance(result, tuple) +# else fmt.format(result) +# ) +# result_str += "," +# return result_str[:-1] # trim the last comma -def to_dict_meas_(measures, measures_dict, fmt="{:.4f}"): - """Given the selected metrics provides a dictionary - with relevant metrics""" - result_dict = {} - # list_space = ['com_ref', 'com_pred', 'list_labels'] - for key in measures: - if len(measures_dict[key]) == 2: - result = measures_dict[key][0]() - else: - result = measures_dict[key][0](measures_dict[key][2]) - result_dict[key] = fmt.format(result) - return result_dict # trim the last comma +# def to_dict_meas_(measures, measures_dict, fmt="{:.4f}"): +# """ +# Given the selected metrics provides a dictionary +# with relevant metrics + +# :param measures: list of measures +# :param measures_dict: dictionary of result for metrics +# :param fmt: format to use (default 4 decimal places) + +# :return: result_dict +# """ +# result_dict = {} +# # list_space = ['com_ref', 'com_pred', 'list_labels'] +# for key in measures: +# if len(measures_dict[key]) == 2: +# result = measures_dict[key][0]() +# else: +# result = measures_dict[key][0](measures_dict[key][2]) +# result_dict[key] = fmt.format(result) +# return result_dict # trim the last comma def combine_df(df1,df2): """ Perform the concatenation of two dataframes - is used in the overall process when combining dataframe from existing and missing/failed prediction - :param: df1 First dataframe to concatenate - :param: df2 Second dataframe to concatenate + :param df1: First dataframe to concatenate + :param df2: Second dataframe to concatenate :return: concatenated dataframe of df1 and df2 """ if df1 is None or df1.shape[0]==0: @@ -500,8 +536,9 @@ def merge_list_df(list_df, on=['label','case']): """ Performs the merging of different dataframes of results given the label and cases values - :param: list_df: list of dataframes to merge together - :param: on list of columns on which to perform the merging operation + :param list_df: list of dataframes to merge together + :param on: list of columns on which to perform the merging operation + :return: df_fin: final merged dataframe """ @@ -512,6 +549,8 @@ def merge_list_df(list_df, on=['label','case']): for f in on: if f not in k.columns: flag_on = False + print(f, ' not present') + break if flag_on: list_fin.append(k) if len(list_fin) == 0: @@ -519,21 +558,21 @@ def merge_list_df(list_df, on=['label','case']): elif len(list_fin) == 1: return list_fin[0] else: - print("list fin is ",list_fin) + #print("list fin is ",list_fin) df_fin = list_fin[0] + print(len(list_fin)) for k in list_fin[1:]: df_fin = pd.merge(df_fin, k, on=on) return df_fin - - - - - def trapezoidal_integration(x, fx): """Trapezoidal integration Reference https://en.wikipedia.org/w/index.php?title=Trapezoidal_rule&oldid=1104074899#Non-uniform_grid + + :param x: values on the x axis + :param fx: values on the y axis + :return: integration """ return np.sum((fx[:-1] + fx[1:])/2 * (x[1:] - x[:-1])) \ No newline at end of file diff --git a/README.rst b/README.rst index 0714635..47980ce 100644 --- a/README.rst +++ b/README.rst @@ -16,12 +16,12 @@ A Python implementaiton of `Metrics Reloaded