-
Notifications
You must be signed in to change notification settings - Fork 322
/
Copy pathasync_dataloader.py
111 lines (93 loc) · 4.54 KB
/
async_dataloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
from queue import Queue
from threading import Thread
import re
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torch._six import container_abcs, string_classes, int_classes
class AsynchronousLoader(object):
"""
Class for asynchronously loading from CPU memory to device memory with DataLoader.
Note that this only works for single GPU training, multiGPU uses PyTorch's DataParallel or
DistributedDataParallel which uses its own code for transferring data across GPUs. This could just
break or make things slower with DataParallel or DistributedDataParallel.
Args:
data: The PyTorch Dataset or DataLoader we're using to load.
device: The PyTorch device we are loading to
q_size: Size of the queue used to store the data loaded to the device
num_batches: Number of batches to load. This must be set if the dataloader
doesn't have a finite __len__. It will also override DataLoader.__len__
if set and DataLoader has a __len__. Otherwise it can be left as None
**kwargs: Any additional arguments to pass to the dataloader if we're
constructing one here
"""
def __init__(self, data, device=torch.device('cuda', 0), q_size=10, num_batches=None, **kwargs):
if isinstance(data, torch.utils.data.DataLoader):
self.dataloader = data
else:
self.dataloader = DataLoader(data, **kwargs)
if num_batches is not None:
self.num_batches = num_batches
elif hasattr(self.dataloader, '__len__'):
self.num_batches = len(self.dataloader)
else:
raise Exception("num_batches must be specified or data must have finite __len__")
self.device = device
self.q_size = q_size
self.load_stream = torch.cuda.Stream(device=device)
self.queue = Queue(maxsize=self.q_size)
self.idx = 0
def load_loop(self): # The loop that will load into the queue in the background
for i, sample in enumerate(self.dataloader):
self.queue.put(self.load_instance(sample))
if i == len(self):
break
# Recursive loading for each instance based on torch.utils.data.default_collate
def load_instance(self, sample):
np_str_obj_array_pattern = re.compile(r'[SaUO]')
elem_type = type(sample)
if torch.is_tensor(sample):
with torch.cuda.stream(self.load_stream):
# Can only do asynchronous transfer if we use pin_memory
if not sample.is_pinned():
sample = sample.pin_memory()
return sample.to(self.device, non_blocking=True)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
if elem_type.__name__ == 'ndarray' \
and np_str_obj_array_pattern.search(sample.dtype.str) is not None:
return self.load_instance(sample)
return self.load_instance(torch.as_tensor(sample))
elif isinstance(sample, container_abcs.Mapping):
return {key: self.load_instance(sample[key]) for key in sample}
elif isinstance(sample, tuple) and hasattr(sample, '_fields'): # namedtuple
return elem_type(*(self.load_instance(d) for d in sample))
elif isinstance(sample, container_abcs.Sequence) and not isinstance(sample, string_classes):
return [self.load_instance(s) for s in sample]
else:
return sample
def __iter__(self):
# We don't want to run the thread more than once
# Start a new thread if we are at the beginning of a new epoch, and our current worker is dead
if (not hasattr(self, 'worker') or not self.worker.is_alive()) and self.queue.empty() and self.idx == 0:
self.worker = Thread(target=self.load_loop)
self.worker.daemon = True
self.worker.start()
return self
def __next__(self):
# If we've reached the number of batches to return
# or the queue is empty and the worker is dead then exit
done = not self.worker.is_alive() and self.queue.empty()
done = done or self.idx >= len(self)
if done:
self.idx = 0
self.queue.join()
self.worker.join()
raise StopIteration
# Otherwise return the next batch
out = self.queue.get()
self.queue.task_done()
self.idx += 1
return out
def __len__(self):
return self.num_batches