diff --git a/aeon/classification/feature_based/__init__.py b/aeon/classification/feature_based/__init__.py index 7e16c0de3c..018ec9c1ba 100644 --- a/aeon/classification/feature_based/__init__.py +++ b/aeon/classification/feature_based/__init__.py @@ -10,10 +10,12 @@ "SummaryClassifier", "TSFreshClassifier", "FreshPRINCEClassifier", + "TDMVDCClassifier", ] from aeon.classification.feature_based._catch22 import Catch22Classifier from aeon.classification.feature_based._fresh_prince import FreshPRINCEClassifier from aeon.classification.feature_based._signature_classifier import SignatureClassifier from aeon.classification.feature_based._summary import SummaryClassifier +from aeon.classification.feature_based._tdmvdc import TDMVDCClassifier from aeon.classification.feature_based._tsfresh import TSFreshClassifier diff --git a/aeon/classification/feature_based/_tdmvdc.py b/aeon/classification/feature_based/_tdmvdc.py index c5fcbb398a..b37b0a7af5 100644 --- a/aeon/classification/feature_based/_tdmvdc.py +++ b/aeon/classification/feature_based/_tdmvdc.py @@ -1,12 +1,24 @@ +import numpy as np +from sklearn.feature_selection import f_classif +from sklearn.linear_model import RidgeClassifierCV +from sklearn.pipeline import Pipeline +from sklearn.preprocessing import StandardScaler from aeon.classification.base import BaseClassifier -import numpy as np +from aeon.transformations.collection.feature_based import ( + TSFreshRelevant, + hard_voting, + series_set_dilation, + series_transform, +) __all__ = ["TDMVDCClassifier"] + class TDMVDCClassifier(BaseClassifier): """ - Tracking Differentiator-based Multiview Dilated Characteristics + Tracking Differentiator-based Multiview Dilated Characteristics. + for Time Series Classification. Parameters @@ -15,47 +27,269 @@ class TDMVDCClassifier(BaseClassifier): Set of TSFresh features to be extracted, options are "minimal", "efficient" or "comprehensive". k1 : floot, default=2 - Filter parameter of the Tracking Differentiator1 with generating first-order + Filter parameter of the Tracking Differentiator1 with generating first-order differential series k2 : floot, default=2 - Filter parameter of the Tracking Differentiator2 with generating second-order + Filter parameter of the Tracking Differentiator2 with generating second-order differential series feature_store_ratios : list, default=[0.1, 0.2, 0.3, 0.4, 0.5] List of feature saving ratios for different feature selectors - - + n_jobs : int, default=1 + The number of jobs to run in parallel for both fit and predict. + + References ---------- - .. [1] Changchun He, and Xin Huo. "Tracking Differentiator-based Multiview Dilated - Characteristics for Time Series Classification." in The 22nd IEEE International + .. [1] Changchun He, and Xin Huo. "Tracking Differentiator-based Multiview Dilated + Characteristics for Time Series Classification." in The 22nd IEEE International Conference on Industrial Informatics (INDIN2024) (2024). """ - _tags = { # needs to be changed later - "capability:multivariate": True, + _tags = { + "capability:multivariate": False, "capability:multithreading": True, - "capability:train_estimate": True, + "capability:train_estimate": False, "algorithm_type": "feature", "python_dependencies": "tsfresh", } - def __init__(self, - default_fc_parameters="efficient", - k1=2, - k2=2, - feature_store_ratios=[0.1, 0.2, 0.3, 0.4, 0.5]): - pass - def _fit(self, X, y): - pass + def __init__( + self, + default_fc_parameters="efficient", + k1=2, + k2=2, + feature_store_ratios=None, + n_jobs=1, + ): + self.default_fc_parameters = default_fc_parameters + self.k1 = k1 + self.k2 = k2 + if feature_store_ratios is None: + feature_store_ratios = [0.1, 0.2, 0.3, 0.4, 0.5] + self.feature_store_ratios = feature_store_ratios + self.n_jobs = n_jobs - def _predict(self, X) -> np.ndarray: - pass + super().__init__() + + def _fit(self, trainSignalX, trainY): + """Fit a pipeline on cases (trainSignalX, trainY). + + where trainY is the target variable. + + Parameters + ---------- + trainSignalX : 3D np.ndarray of shape = [n_cases, n_channels, n_timepoints] + The training data. + trainY : array-like, shape = [n_cases] + The class labels. Each type of label is int. + + Returns + ------- + self : + Reference to self. + """ + # Initialization of dilation rate parameters + n_timepoints = trainSignalX.shape[2] # The number of time points + d_min = 0 # The minimum dilation rate corresponds to no dilation + d_max = int(np.log2(n_timepoints - 1) - 3) + d_max = np.min([5, d_max]) # The maximum dilation rate + self.dList = 2 ** np.arange(d_min, d_max + 1) # The dilation rate list + + # Differential transformations by tracking differentiator + trainSignalFX = series_transform( + trainSignalX, mode=1, k1=self.k1 + ) # First-order differential + trainSignalSX = series_transform( + trainSignalX, mode=2, k1=self.k1, k2=self.k2 + ) # Second-order differential + + # Feature extraction + self.tsFreshListR = [] + # List of feature extractors corresponding + # to the original series set + self.tsFreshListF = [] + # List of feature extractors corresponding + # to the First-order differential series set + self.tsFreshListS = [] + # List of feature extractors corresponding + # to the Second-order differential series set + + trainRXList = [] + # List of train feature sets corresponding + # to the original series set + trainFXList = [] + # List of train feature sets corresponding + # to the First-order differential series set + trainSXList = [] + # List of train feature sets corresponding + # to the Second-order differential series set + + for i in range(len(self.dList)): # For each dilation rate + # Dilation Mapping + d_rate = self.dList[i] # Dilation rate + trainSignalRX_E = series_set_dilation( + trainSignalX, d_rate + ) # Dilated original series set + trainSignalFX_E = series_set_dilation( + trainSignalFX, d_rate + ) # Dilated First-order differential series set + trainSignalSX_E = series_set_dilation( + trainSignalSX, d_rate + ) # Dilated Second-order differential series set + + # Extracting the TSFresh features for each dilated series set + tsFreshR = TSFreshRelevant( + default_fc_parameters="efficient", n_jobs=self.n_jobs + ) + tsFreshR.fit(trainSignalRX_E, trainY) + trainRX = np.array(tsFreshR.transform(trainSignalRX_E)) + + tsFreshF = TSFreshRelevant( + default_fc_parameters="efficient", n_jobs=self.n_jobs + ) + tsFreshF.fit(trainSignalFX_E, trainY) + trainFX = np.array(tsFreshF.transform(trainSignalFX_E)) + + tsFreshS = TSFreshRelevant( + default_fc_parameters="efficient", n_jobs=self.n_jobs + ) + tsFreshS.fit(trainSignalSX_E, trainY) + trainSX = np.array(tsFreshS.transform(trainSignalSX_E)) - def _predict_proba(self, X): #optional + # Saving the feature extractors + self.tsFreshListR.append(tsFreshR) + self.tsFreshListF.append(tsFreshF) + self.tsFreshListS.append(tsFreshS) + + # Saving the TSFresh features + trainRXList.append(trainRX) + trainFXList.append(trainFX) + trainSXList.append(trainSX) + + # Concatenating all the dilated features into a feature set + trainRX = np.hstack(trainRXList) # Corresponding to the original series set + trainFX = np.hstack( + trainFXList + ) # Corresponding to the First-order differential series set + trainSX = np.hstack( + trainSXList + ) # Corresponding to the Second-order differential series set + + # Classification + self.clfList = [] # List of classifiers composed of different views + + # Computing feature scores + self.scoreRFS = f_classif(np.hstack((trainRX, trainFX, trainSX)), trainY)[0] + self.scoreRFS[np.isnan(self.scoreRFS)] = 0 + + # Training the classifier on each view + for i in range(len(self.feature_store_ratios)): # for each view + ratio_ = self.feature_store_ratios[i] # The feature store ratio + clf = Pipeline( + [ + ("scaler", StandardScaler()), # Normalize the features + ("ridge", RidgeClassifierCV(alphas=np.logspace(-3, 3, 10))), + ] + ) + bestIndex_ = np.argsort(self.scoreRFS)[::-1][ + 0 : int(len(self.scoreRFS) * ratio_) + ] # The feature indexes of the top scores + clf.fit(np.hstack((trainRX, trainFX, trainSX))[:, bestIndex_], trainY) + self.clfList.append(clf) # Saving the trained classifier + return self + + def _predict(self, testSignalX): + """Predict class values of n instances in testSignalX. + + Parameters + ---------- + testSignalX : 3D np.ndarray of shape = [n_cases, n_channels, n_timepoints] + The data to make predictions for testSignalX. + + Returns + ------- + y : array-like, shape = [n_cases] + Predicted class labels. + """ + testSignalFX = series_transform( + testSignalX, mode=1, k1=self.k1 + ) # First-order differential + testSignalSX = series_transform( + testSignalX, mode=2, k1=self.k1, k2=self.k2 + ) # Second-order differential + + # Feature extraction + testRXList = [] + # List of test feature sets corresponding + # to the original series set + testFXList = [] + # List of test feature sets corresponding + # to the First-order differential series set + testSXList = [] + # List of test feature sets corresponding + # to the Second-order differential series set + + for i in range(len(self.dList)): # For each dilation rate + # Dilation Mapping + d_rate = self.dList[i] # Dilation rate + testSignalRX_E = series_set_dilation( + testSignalX, d_rate + ) # Dilated original series set + testSignalFX_E = series_set_dilation( + testSignalFX, d_rate + ) # Dilated First-order differential series set + testSignalSX_E = series_set_dilation( + testSignalSX, d_rate + ) # Dilated Second-order differential series set + + # Extracting the TSFresh features for each dilated series set + tsFreshR = self.tsFreshListR[i] + testRX = np.array(tsFreshR.transform(testSignalRX_E)) + + tsFreshF = self.tsFreshListF[i] + testFX = np.array(tsFreshF.transform(testSignalFX_E)) + + tsFreshS = self.tsFreshListS[i] + testSX = np.array(tsFreshS.transform(testSignalSX_E)) + + # Saving the TSFresh features + testRXList.append(testRX) + testFXList.append(testFX) + testSXList.append(testSX) + + # Concatenating all the dilated features into a feature set + testRX = np.hstack(testRXList) # Corresponding to the original series set + testFX = np.hstack( + testFXList + ) # Corresponding to the First-order differential series set + testSX = np.hstack( + testSXList + ) # Corresponding to the Second-order differential series set + + # Classification + testPYList = [] # List of predicted labels on each view + # Predicting the labels of each view + for i in range(len(self.feature_store_ratios)): # for each view + ratio_ = self.feature_store_ratios[i] # The feature store ratio + clf = self.clfList[i] + bestIndex_ = np.argsort(self.scoreRFS)[::-1][ + 0 : int(len(self.scoreRFS) * ratio_) + ] + testPY_ = clf.predict( + np.hstack((testRX, testFX, testSX))[:, bestIndex_] + ) # prediction + testPYList.append(testPY_) # Saving the predicted labels + testPYV = hard_voting( + np.vstack(testPYList) + ) # The final predicted labels is generated by hard voting + return testPYV + + def _predict_proba(self, X): # optional return super()._predict_proba(X) + def _fit_predict(self, X, y) -> np.ndarray: pass - + @classmethod def _get_test_params(cls, parameter_set="default"): - pass \ No newline at end of file + pass diff --git a/aeon/transformations/collection/feature_based/__init__.py b/aeon/transformations/collection/feature_based/__init__.py index edf77f4ec4..f885eccbe6 100644 --- a/aeon/transformations/collection/feature_based/__init__.py +++ b/aeon/transformations/collection/feature_based/__init__.py @@ -5,13 +5,27 @@ "TSFresh", "TSFreshRelevant", "SevenNumberSummary", - "TSFreshFeatureExtractor", + "TSFreshExtractor", + "TSFreshRelevantExtractor", + "series_set_dilation", + "series_transform", + "td", + "fhan", + "hard_voting", ] from aeon.transformations.collection.feature_based._catch22 import Catch22 from aeon.transformations.collection.feature_based._summary import SevenNumberSummary +from aeon.transformations.collection.feature_based._tdmvdc_extractor import ( + TSFreshExtractor, + TSFreshRelevantExtractor, + fhan, + hard_voting, + series_set_dilation, + series_transform, + td, +) from aeon.transformations.collection.feature_based._tsfresh import ( TSFresh, TSFreshRelevant, ) -from aeon.transformations.collection.feature_based._tdmvdc_extractor import TSFreshFeatureExtractor \ No newline at end of file diff --git a/aeon/transformations/collection/feature_based/_tdmvdc_extractor.py b/aeon/transformations/collection/feature_based/_tdmvdc_extractor.py index ccb7be1123..a3a81ca7b5 100644 --- a/aeon/transformations/collection/feature_based/_tdmvdc_extractor.py +++ b/aeon/transformations/collection/feature_based/_tdmvdc_extractor.py @@ -1,52 +1,155 @@ import numpy as np -from sklearn.linear_model import RidgeClassifierCV # Normalize parameter is need set to True. -from sklearn.preprocessing import StandardScaler +import pandas as pd from sklearn.preprocessing import scale -from sklearn.feature_selection import f_classif -from sklearn.pipeline import Pipeline + from aeon.transformations.collection.base import BaseCollectionTransformer -from aeon.utils.validation import check_n_jobs -class TSFreshFeatureExtractor(BaseCollectionTransformer): - pass +# from aeon.utils.validation import check_n_jobs + + +class TSFreshExtractor(BaseCollectionTransformer): + def __init__(self, default_fc_parameters="efficient", n_jobs=1): + self.default_fc_parameters = default_fc_parameters + self.n_jobs = n_jobs + super().__init__() + + def _fit(self, X, y=None): + """Empty fit method maintaining interface.""" + return self + + def _transform(self, X, y=None): + """Core feature extraction logic.""" + df = self._convert_to_long(X) + + # Lazy import of tsfresh components + from tsfresh import extract_features + from tsfresh.feature_extraction.settings import ( + ComprehensiveFCParameters, + EfficientFCParameters, + MinimalFCParameters, + ) + + param_map = { + "minimal": MinimalFCParameters(), + "efficient": EfficientFCParameters(), + "comprehensive": ComprehensiveFCParameters(), + } + + fc_params = param_map.get( + self.default_fc_parameters, self.default_fc_parameters + ) + + features = extract_features( + df, + column_id="index", + column_value="value", + column_kind="column", + column_sort="time_index", + default_fc_parameters=fc_params, + n_jobs=self.n_jobs, + ) + return features.values + + def _fit_transform(self, X, y=None): + """Combine fit/transform workflow.""" + self._fit(X, y) + return self._transform(X, y) + + def _convert_to_long(self, arr): + """Convert 3D array to tsfresh long format.""" + n_cases, n_channels, _ = arr.shape + df = pd.DataFrame(arr.reshape(n_cases * n_channels, -1)) + df["index"] = np.repeat(np.arange(n_cases), n_channels) + df["column"] = np.tile(np.arange(n_channels), n_cases) + + df = df.melt( + id_vars=["index", "column"], var_name="time_index", value_name="value" + ) + return df[["index", "time_index", "column", "value"]] + + +class TSFreshRelevantExtractor(BaseCollectionTransformer): + def __init__( + self, + default_fc_parameters="efficient", + n_jobs=1, + fdr_level=0.05, + hypotheses_independent=False, + ml_task="auto", + ): + self.default_fc_parameters = default_fc_parameters + self.n_jobs = n_jobs + self.fdr_level = fdr_level + self.hypotheses_independent = hypotheses_independent + self.ml_task = ml_task + super().__init__() + + def _fit(self, X, y=None): + """Fit both feature extractor and selector.""" + from tsfresh.transformers.feature_selector import FeatureSelector + + # Feature extraction + self.extractor_ = TSFreshExtractor( + default_fc_parameters=self.default_fc_parameters, n_jobs=self.n_jobs + ) + Xt = self.extractor_.fit_transform(X) + + # Feature selection + self.selector_ = FeatureSelector( + fdr_level=self.fdr_level, + hypotheses_independent=self.hypotheses_independent, + ml_task=self.ml_task, + ) + self.selector_.fit(Xt, y) + self.names_ = self.selector_.relevant_features + return self + + def _transform(self, X, y=None): + """Apply feature extraction and selection.""" + Xt = self.extractor_.transform(X) + return self.selector_.transform(Xt) + + def _fit_transform(self, X, y=None): + """Combine fit/transform with feature selection.""" + self._fit(X, y) + return self._transform(X, y) def series_set_dilation(seriesX, d_rate=1): """ - Each series of the time series set is mapped by dilation mapping with - the same dilation rate to output the dilated time series set. + Map each series of the time series set by dilation mapping. + + Should have the same dilation rate. Parameters ---------- - seriesX : 3D np.ndarray of shape = [n_cases, n_channels, n_timepoints] + seriesX : 3D np.ndarray of shape = [n_cases, n_channels, n_timepoints] The set of three dimensional time series set to be dilated. d_rate : int, default=1 - dilation rate. - + Dilation rate. + References ---------- - .. [1] P. Schaefer and U. Leser, “WEASEL 2.0: a random dilated dictionary - transform for fast, accurate and memory constrained time series classification” + .. [1] P. Schaefer and U. Leser, “WEASEL 2.0: a random dilated dictionary + transform for fast, accurate and memory constrained time series classification” Machine Learning, vol. 112, no. 12, pp. 4763–4788, Dec.(2024). """ - n_cases, n_channels, _ = seriesX.shape[:] seriesXE = np.zeros_like(seriesX) # Initializing the dilated time series set - + for i in range(n_cases): for j in range(n_channels): series_ = [] for d in range(d_rate): series_.append(seriesX[i, j, d::d_rate]) seriesXE[i, j, :] = np.hstack(series_) - + return seriesXE # Return the dilated time series set def fhan(x1, x2, r, h0): """ - The fhan function for calculating differential signal based on optimal - control in tracking differentiator. + Calculate differential signal based on optimal control in tracking differentiator. Parameters ---------- @@ -58,119 +161,124 @@ def fhan(x1, x2, r, h0): Velocity factor used to control tracking speed. h0 : float Step size. - + References ---------- .. [1] J. Han, “From PID to active disturbance rejection control” IEEE Trans. Ind. Electron., vol. 56, no. 3, pp. 900-906, Mar. (2009).. """ - d = r * h0 d0 = d * h0 - y = x1 + h0 * x2 # Computing the differential signal - a0 = np.sqrt(d*d + 8*r*np.abs(y)) + y = x1 + h0 * x2 # Computing the differential signal + a0 = np.sqrt(d * d + 8 * r * np.abs(y)) if np.abs(y) > d0: - a = x2 + (a0-d) / 2.0 * np.sign(y) + a = x2 + (a0 - d) / 2.0 * np.sign(y) else: - a = x2 + y/h0 + a = x2 + y / h0 if np.abs(a) <= d: # Computing the input u of observer u = -r * a / d else: u = -r * np.sign(a) - return u, y # Return input u of observer, and differential signal y + return u, y # Return input u of observer, and differential signal y def td(signal, r=100, k=3, h=1): """ - The tracking differentiator with a adjustable filter factor to - compute a differential signal + Compute a differential signal using the tracking differentiator. + + with an adjustable filter factor. Parameters ---------- - signal : 1D np.ndarray of shape = [n_timepoints] + signal : 1D np.ndarray of shape = [n_timepoints] Original time series r : float Velocity factor used to control tracking speed. k: float - filter factor. + Filter factor. h : float Step size. - + References ---------- .. [1] J. Han, “From PID to active disturbance rejection control” IEEE Trans. Ind. Electron., vol. 56, no. 3, pp. 900-906, Mar. (2009).. """ - x1 = signal[0] # Initializing state 1 - x2 = -(signal[1] - signal[0]) / h # Initializing state 2 + x1 = signal[0] # Initializing state 1 + x2 = -(signal[1] - signal[0]) / h # Initializing state 2 h0 = k * h signalTD = np.zeros(len(signal)) dSignal = np.zeros(len(signal)) for i in range(len(signal)): v = signal[i] - x1k = x1 - x2k = x2 - x1 = x1k + h*x2k # Update state 1 - u, y = fhan(x1k-v, x2k, r, h0) # Update input u of observer and differential signal y + x1k = x1 + x2k = x2 + x1 = x1k + h * x2k # Update state 1 + u, y = fhan( + x1k - v, x2k, r, h0 + ) # Update input u of observer and differential signal y x2 = x2k + h * u # Update state 2 - dSignal[i] = y - signalTD[i] = x1 + dSignal[i] = y + signalTD[i] = x1 dSignal = -dSignal / h0 # Scale transform return dSignal[1:] # Return the differential signal -def series_transform(seriesX, mode=1, k1=2, k2=2): # 时间序列变换,包括 + +def series_transform(seriesX, mode=1, k1=2, k2=2): """ - Each series of the time series set is transformed by tracking differentiator with - a adjustable filter factor to output the differential time series set. + Transform each series of the time series set using a tracking differentiator. + + with an adjustable filter factor. Parameters ---------- - seriesX : 3D np.ndarray of shape = [n_cases, n_channels, n_timepoints] + seriesX : 3D np.ndarray of shape = [n_cases, n_channels, n_timepoints] The set of three dimensional time series set to be dilated. mode : int, default=1 The flag bit of a first-order or second-order derivative is used. - Computing the first-order derivative when mode=1, + Computing the first-order derivative when mode=1, and computing the second-order derivative when mode=2 k1 : float, default=2 - filter factor 1 of the tracking differentiator 1. + Filter factor 1 of the tracking differentiator 1. k2 : float, default=2 - filter factor 2 of the tracking differentiator 2. + Filter factor 2 of the tracking differentiator 2. This parameter is invalid when mode=2. - + References ---------- .. [1] J. Han, “From PID to active disturbance rejection control” IEEE Trans. Ind. Electron., vol. 56, no. 3, pp. 900-906, Mar. (2009).. """ - n_cases, n_channels, n_timepoints = seriesX.shape[:] if mode == 1: # First-order derivative - seriesFX = np.zeros((n_cases, n_channels, n_timepoints-1)) + seriesFX = np.zeros((n_cases, n_channels, n_timepoints - 1)) for i in range(n_cases): for j in range(n_channels): seriesFX[i, j, :] = td(seriesX[i, j, :], k=k1) seriesFX[i, j, :] = scale(seriesFX[i, j, :]) return seriesFX # Return the first-order differential time series set - + if mode == 2: # Second-order derivative - seriesSX = np.zeros((n_cases, n_channels, n_timepoints-2)) + seriesSX = np.zeros((n_cases, n_channels, n_timepoints - 2)) for i in range(n_cases): for j in range(n_channels): seriesF_ = td(seriesX[i, j, :], k=k1) seriesSX[i, j, :] = td(seriesF_, k=k2) seriesSX[i, j, :] = scale(seriesSX[i, j, :]) return seriesSX # Return the second-order differential time series set - + + def hard_voting(testYList): """ - The predicted labels are obtained by hard voting to process the labels matrix - from multiple classifiers. + Obtain the predicted labels by hard voting. + + to process the labels matrix from multiple classifiers. Parameters ---------- @@ -178,15 +286,21 @@ def hard_voting(testYList): """ uniqueY = np.unique(testYList) # Holds the label for each class n_classes = len(uniqueY) # Number of classes - n_classifiers, n_cases= testYList.shape[:] # Number of classifiers, Number of cases + n_classifiers, n_cases = testYList.shape[ + : + ] # Number of classifiers, Number of cases testVY = np.zeros(n_cases, int) # 1 * n_cases, Initializing the predicted labels - - testWeightArray = np.zeros((n_classes, n_cases)) # n_classes * n_cases, Label weight matrix for samples + + testWeightArray = np.zeros( + (n_classes, n_cases) + ) # n_classes * n_cases, Label weight matrix for samples for i in range(n_cases): for j in range(n_classifiers): label_ = testYList[j, i] - index_ = np.arange(n_classes)[uniqueY==label_] - testWeightArray[index_, i] += 1 # The label weight for the sample is + 1 + index_ = np.arange(n_classes)[uniqueY == label_] + testWeightArray[index_, i] += 1 # The label weight for the sample is + 1 for i in range(n_cases): # Predicting each sample label - testVY[i] = uniqueY[np.argmax(testWeightArray[:, i])] # The label is predicted to be the most weighted - return testVY # return the predicted labels \ No newline at end of file + testVY[i] = uniqueY[ + np.argmax(testWeightArray[:, i]) + ] # The label is predicted to be the most weighted + return testVY # return the predicted labels diff --git a/examples/classification/feature_based.ipynb b/examples/classification/feature_based.ipynb index 2424486b0c..fea64950a7 100644 --- a/examples/classification/feature_based.ipynb +++ b/examples/classification/feature_based.ipynb @@ -164,6 +164,74 @@ "metrics.accuracy_score(y_test, c22_preds)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tracking Differentiator-based Multiview Dilated Characteristics (TDMVDC) Classifier\n", + "\n", + "Time Series Feature Extraction based on Scalable Hypothesis Tests classifier.\n", + "\n", + "This classifier simply transforms the input data using the TSFresh [1]_\n", + "transformer and builds a provided estimator using the transformed data.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Loading time series set\n", + "from aeon.datasets import load_arrow_head\n", + "\n", + "trainSignalX, trainY = load_arrow_head(\"TRAIN\")\n", + "testSignalX, testY = load_arrow_head(\"TEST\")\n", + "trainY, testY = trainY.astype(int), testY.astype(int)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1/5 2/5 3/5 4/5 5/5 \t\n", + "1/5 2/5 3/5 4/5 5/5 \t\n" + ] + } + ], + "source": [ + "from aeon.classification.feature_based import TDMVDCClassifier\n", + "\n", + "td_mvdc = TDMVDCClassifier(n_jobs=5)\n", + "td_mvdc.fit(trainSignalX, trainY)\n", + "testPY = td_mvdc.predict(testSignalX)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accuracy : 0.7771428571428571\n" + ] + } + ], + "source": [ + "import numpy as np\n", + "\n", + "accV = np.sum(testPY == testY) / len(testY)\n", + "print(\"Accuracy :\", accV)" + ] + }, { "attachments": {}, "cell_type": "markdown", @@ -395,7 +463,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "aeon-venv", "language": "python", "name": "python3" }, @@ -409,7 +477,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.8" + "version": "3.12.9" } }, "nbformat": 4,