Skip to content
This repository has been archived by the owner on Jan 12, 2024. It is now read-only.

Commit

Permalink
remove comments which were moved to github
Browse files Browse the repository at this point in the history
  • Loading branch information
mibaumgartner committed Dec 3, 2019
1 parent 339e639 commit d701a93
Show file tree
Hide file tree
Showing 3 changed files with 2 additions and 14 deletions.
2 changes: 0 additions & 2 deletions rising/loading/container.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,6 @@ def split_by_index(self, split: SplitType) -> None:
for key, idx in split.items():
self._dset[key] = self._dataset.get_subset(idx)

# TODO: Shouldn"t the kfold methods instead yield the current datasets
# instead of the whole cointainer?
def kfold_by_index(self, splits: typing.Iterable[SplitType]):
"""
Produces kfold splits based on the given indices.
Expand Down
10 changes: 2 additions & 8 deletions rising/loading/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@

from torch.utils.data import Dataset as TorchDset
from rising.loading.debug_mode import get_debug_mode
from rising import AbstractMixin
from torch.multiprocessing import Pool


Expand Down Expand Up @@ -328,10 +329,7 @@ def __len__(self) -> int:
return len(self.data)


# TODO: Maybe we should add the dataset baseclass as baseclass of this as well
# (since it should just extend it and still have all the other dataset
# functionalities)?
class IDManager:
class IDManager(AbstractMixin):
def __init__(self, id_key: str, cache_ids: bool = True, **kwargs):
"""
Helper class to add additional functionality to Datasets
Expand Down Expand Up @@ -442,8 +440,6 @@ def __init__(self, data_path, load_fn, id_key, cache_ids=True,
**kwargs :
additional keyword arguments
"""
# TODO: Shouldn't we call the baseclasses explicitly here? with super
# it is not clear, which baseclass is actually called
super().__init__(data_path=data_path, load_fn=load_fn, id_key=id_key,
cache_ids=cache_ids, **kwargs)

Expand All @@ -467,7 +463,5 @@ def __init__(self, data_path, load_fn, id_key, cache_ids=True,
**kwargs :
additional keyword arguments
"""
# TODO: Shouldn't we call the baseclasses explicitly here? with super
# it is not clear, which baseclass is actually called
super().__init__(data_path=data_path, load_fn=load_fn, id_key=id_key,
cache_ids=cache_ids, **kwargs)
4 changes: 0 additions & 4 deletions rising/loading/splitter.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,10 +155,6 @@ def index_split_grouped(self, groups_key: str = "id", **kwargs) -> SplitType:
--------
Shuffling cannot be deactivated
"""
# TODO: maybe we should implement a single split function, which
# handles random, stratificated and grouped splitting? This would not
# be hard at all (based on the first look at sklearn internals) and
# would remove some code duolication in here
split_dict = {}
groups = [d[groups_key] for d in self._dataset]

Expand Down

0 comments on commit d701a93

Please sign in to comment.