-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
2367c71
commit 07eedc0
Showing
23 changed files
with
3,974 additions
and
82 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file was deleted.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,5 +1,5 @@ | ||
{ | ||
"data_dir" : "../data/FindingELO", | ||
"cache_dir" : "../data/FindingELO/cache", | ||
"data_dir" : "../../data/FindingELO", | ||
"cache_dir" : "../../data/FindingELO/cache", | ||
"num_partitions" : 3 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
Training | ||
__KeepChannel_[28, 29, 30]__ButterworthFilter_0.1_15_4_200_epoched__EEGConcatExtracter__Downsampler_dec_8 | ||
Fitting 4 folds for each of 1 candidates, totalling 4 fits | ||
[Parallel(n_jobs=-1)]: Done 1 jobs | elapsed: 10.7s | ||
[Parallel(n_jobs=-1)]: Done 4 out of 4 | elapsed: 11.0s finished | ||
Cross Validation Stats | ||
Best parameters set: | ||
GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance', | ||
max_depth=2, max_features=0.25, max_leaf_nodes=None, | ||
min_samples_leaf=1, min_samples_split=2, n_estimators=500, | ||
random_state=None, subsample=1.0, verbose=0, | ||
warm_start=False) | ||
Grid scores on development set: | ||
0.661 (+/-0.025) for {'max_features': 0.25, 'n_estimators': 500, 'learning_rate': 0.05, 'max_depth': 2} | ||
Training | ||
____KeepChannel_[28, 29, 30]__ButterworthFilter_0.1_15_4_200_epoched__EEGConcatExtracter__Downsampler_dec_12__KeepChannel_[28, 29, 30]__ButterworthFilter_0.1_15_4_200_epoched__<transforms.ActualWaveletTransform instance at 0x7f6c07a37fc8>__EEGConcatExtracter__Downsampler_dec_12 | ||
(5440, 260, 3) | ||
Fitting 4 folds for each of 1 candidates, totalling 4 fits | ||
[Parallel(n_jobs=-1)]: Done 1 jobs | elapsed: 10.5s | ||
[Parallel(n_jobs=-1)]: Done 4 out of 4 | elapsed: 10.9s finished | ||
Cross Validation Stats | ||
Best parameters set: | ||
GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance', | ||
max_depth=2, max_features=0.25, max_leaf_nodes=None, | ||
min_samples_leaf=1, min_samples_split=2, n_estimators=500, | ||
random_state=None, subsample=1.0, verbose=0, | ||
warm_start=False) | ||
Grid scores on development set: | ||
0.661 (+/-0.022) for {'max_features': 0.25, 'n_estimators': 500, 'learning_rate': 0.05, 'max_depth': 2} | ||
Training | ||
____KeepChannel_[28, 29, 30]__ButterworthFilter_0.1_15_4_200_epoched__EEGConcatExtracter__Downsampler_dec_24__KeepChannel_[28, 29, 30]__ButterworthFilter_0.1_15_4_200_epoched__<transforms.ActualWaveletTransform instance at 0x7f6c07a3e4d0>__EEGConcatExtracter__Downsampler_dec_24 | ||
(5440, 260, 3) | ||
Fitting 4 folds for each of 1 candidates, totalling 4 fits | ||
[Parallel(n_jobs=-1)]: Done 1 jobs | elapsed: 5.6s | ||
[Parallel(n_jobs=-1)]: Done 4 out of 4 | elapsed: 5.9s finished | ||
Cross Validation Stats | ||
Best parameters set: | ||
GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance', | ||
max_depth=2, max_features=0.25, max_leaf_nodes=None, | ||
min_samples_leaf=1, min_samples_split=2, n_estimators=500, | ||
random_state=None, subsample=1.0, verbose=0, | ||
warm_start=False) | ||
Grid scores on development set: | ||
0.658 (+/-0.025) for {'max_features': 0.25, 'n_estimators': 500, 'learning_rate': 0.05, 'max_depth': 2} | ||
Training | ||
____KeepChannel_[28, 29, 30]__ButterworthFilter_0.1_15_4_200_epoched__EEGConcatExtracter__Downsampler_dec_12__KeepChannel_[28, 29, 30]__ButterworthFilter_0.1_15_4_200_epoched__<transforms.ActualWaveletTransform instance at 0x7f6c07a3e998>__EEGConcatExtracter__Downsampler_dec_24 | ||
(5440, 260, 3) | ||
Fitting 4 folds for each of 1 candidates, totalling 4 fits | ||
[Parallel(n_jobs=-1)]: Done 1 out of 4 | elapsed: 9.0s remaining: 26.9s | ||
[Parallel(n_jobs=-1)]: Done 4 out of 4 | elapsed: 9.2s finished | ||
Cross Validation Stats | ||
Best parameters set: | ||
GradientBoostingClassifier(init=None, learning_rate=0.05, loss='deviance', | ||
max_depth=2, max_features=0.25, max_leaf_nodes=None, | ||
min_samples_leaf=1, min_samples_split=2, n_estimators=500, | ||
random_state=None, subsample=1.0, verbose=0, | ||
warm_start=False) | ||
Grid scores on development set: | ||
0.657 (+/-0.025) for {'max_features': 0.25, 'n_estimators': 500, 'learning_rate': 0.05, 'max_depth': 2} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,69 @@ | ||
from pandas import read_csv | ||
import os | ||
from extras import * | ||
import numpy as np | ||
from model_gen import get_preproc | ||
|
||
def read_metadata(params): | ||
""" | ||
Return metadata about channels and extra associated information given in 'ChannelsLocation.csv' | ||
""" | ||
metadata_fname = "ChannelsLocation.csv" | ||
return read_csv(os.path.join(params.data_dir,metadata_fname)) | ||
|
||
def read_maindata(params,type): | ||
""" | ||
Read training/testing set files and extract the essential X,y matrices | ||
which can be processed further. To do this we first get list of files | ||
corresponding to subjects given in 'settings.json'. Then parse these to | ||
get a final list of dataframes with every item corresponding to a single | ||
feedback event and mapped y label | ||
:param params:'settings.json' paramters | ||
:param type:'train' or 'test' | ||
:return: if 'train' then returns X,y | ||
if 'test' then returns X | ||
""" | ||
assert (type == "train" or type == "test"),"type field must be either train or test" | ||
flist,subjsess_list = get_filelist(params,type) | ||
X = [] | ||
if type == "train": | ||
train_labels = read_csv(os.path.join(params.data_dir,"TrainLabels.csv")) | ||
y = [] | ||
for findex in range(len(flist)): | ||
X.extend(get_x(read_csv(flist[findex]),0,260)) | ||
y.extend(train_labels[ | ||
train_labels["IdFeedBack"].str.contains( | ||
subjsess_list[findex])]["Prediction"]) | ||
assert (len(X) == len(y)),"Training and Prediction Set values dont match" | ||
return (np.array(X),y) | ||
elif type == "test": | ||
for findex in range(len(flist)): | ||
X.extend(get_x(read_csv(flist[findex]),0,260)) | ||
return (np.array(X)) | ||
|
||
def get_x(my_df,start_offset,end_offset): | ||
""" | ||
Every loaded csv file has EEG + EOG + FeedbackEvent info | ||
This function takes a loaded csv, extracts the necessary feedback events | ||
Finally depending on provided indices here it extracts required time periods | ||
of data for every feedback event | ||
:param my_df: csv read as dataframe | ||
:return: list of dataframes where each dataframe corresponds | ||
to data for a given time period corresponding to a FeedbackEvent | ||
""" | ||
fb_list = [] | ||
fb_indices = my_df[my_df["FeedBackEvent"] == 1].index.tolist() | ||
my_df = my_df.drop('FeedBackEvent', axis = 1).drop('Time',axis=1).drop('EOG',axis=1).as_matrix() | ||
# my_df = my_df.drop('FeedBackEvent', axis = 1).as_matrix() | ||
my_df = get_preproc().transform(my_df) | ||
for fb_ind in fb_indices: | ||
fb_list.append(my_df[fb_ind + start_offset:fb_ind + end_offset,:]) | ||
return fb_list | ||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
Oops, something went wrong.