data_pools.py 7 KB
Newer Older
Richard Vogl's avatar
Richard Vogl committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
import numpy as np


class BatchIterator(object):
    """
    Prototype for batch iterator
    """

    def __init__(self, batch_size, re_iterate=1, prepare=None, k_samples=None, shuffle=True):
        self.batch_size = batch_size

        if prepare is None:
            def prepare(*data):
                return data
        self.prepare = prepare

        self.re_iterate = re_iterate
        self.k_samples = k_samples
        self.shuffle = shuffle
        self.epoch_counter = 0
        self.n_epochs = None

    def __call__(self, data_pool):
        self.data_pool = data_pool
        if self.k_samples is None or self.k_samples > self.data_pool.shape[0]:
            self.k_samples = self.data_pool.shape[0]
        self.n_batches = self.re_iterate * (self.k_samples // self.batch_size)
        self.n_epochs = self.data_pool.shape[0] // self.k_samples

        if self.shuffle:
            self.data_pool.shuffle()

        return self

    def __iter__(self):

        # compute current epoch index
        idx_epoch = np.mod(self.epoch_counter, self.n_epochs)

        # reiterate entire data-set
        for _ in xrange(self.re_iterate):

            # use only k samples per epoch
            for i_b in xrange((self.k_samples + self.batch_size - 1) / self.batch_size):

                # slice batch data
                start = i_b * self.batch_size + idx_epoch * self.k_samples
                stop = (i_b + 1) * self.batch_size + idx_epoch * self.k_samples
                stop = np.min([stop, self.data_pool.shape[0]])
                sl = slice(start, stop)
                xb = self.data_pool[sl]

                # get missing samples
                n_sampels = xb[0].shape[0]
                if n_sampels < self.batch_size:
                    n_missing = self.batch_size - n_sampels

                    x_con = self.data_pool[0:n_missing]
                    for i_input in xrange(len(xb)):
                        xb[i_input] = np.concatenate((xb[i_input], x_con[i_input]))

                yield self.transform(xb)

            # increase epoch counter
            self.epoch_counter += 1

        # shuffle train data after full set iteration
        if self.shuffle and (idx_epoch + 1) == self.n_epochs:
            self.data_pool.shuffle()

    def transform(self, data):
        return self.prepare(*data)


class UniversalRegressionDataPool(object):
    """ Regression data pool for RNNs, ConvNets and Convolutional RNNs """

    def __init__(self, sequences, target_sequences, sub_sequence_length=1, data_context=1, step_size=1,
                 central_target=True, do_shuffle=True):
        """ Constructor

        Parameters
        ----------

        sequences : list
            List of data sequences (input to your network)

        target_sequences : list
            List of target sequences (target of your network prediction). Must be in line with sequences.

        sub_sequence_length : int
            Number of time steps for each training example

        data_context : int
            Temporal context for each time step. This is required for convolution and convolution RNNs.
            Has to be an odd number 1, 3, 5, ...

        step_size : int
            Step size for producing the sub sequences

        central_target : bool
            If true only the central target is returned for prediction. Set this to true for conv-Nets and conv-RNNs.

        do_shuffle : bool
            If true data gets shuffled on initialization
        """

        self.sequences = sequences
        self.target_sequences = target_sequences
        self.sub_seqence_length = sub_sequence_length
        self.data_context = data_context
        self.step_size = step_size
        self.central_target = central_target

        self.half_context = (self.data_context - 1) // 2

        self.do_shuffle = do_shuffle

        self.n_sequences = len(self.sequences)

        self.train_items = None
        self.shape = None

        self.prepare_train_items()

        if self.do_shuffle:
            self.shuffle()

    def shuffle(self):
        rand_idx = np.random.permutation(self.shape[0])
        self.train_items = self.train_items[rand_idx]

    def prepare_train_items(self):

        seq_lengths = [sequence.shape[0] for sequence in self.sequences]
        n_items = sum([int(np.ceil(max(0, seq_len - self.sub_seqence_length - self.data_context+1)/float(self.step_size))) for seq_len in seq_lengths])

        self.train_items = np.zeros((n_items, 2), dtype=np.int)
        out_idx = 0

        for i_seq in xrange(self.n_sequences):
            sequence = self.sequences[i_seq]
            target = self.target_sequences[i_seq]
            assert len(sequence) == len(target)

            start_idx = self.half_context
            stop_idx = sequence.shape[0] - self.sub_seqence_length - self.half_context

            for i_step in xrange(start_idx, stop_idx, self.step_size):
                self.train_items[out_idx, :] = np.asarray(([i_seq, i_step]), dtype=np.int).reshape((1, 2))
                out_idx += 1

        self.shape = [self.train_items.shape[0]]
        assert n_items == self.shape[0]

    def __getitem__(self, key):

        # get batch
        if key.__class__ != slice:
            key = slice(key, key + 1)

        # fix out of bounds
        key = slice(key.start, np.min([self.shape[0], key.stop]))

        # prepare list of files
        X = []
        Y = []
        for item_id in range(key.start, key.stop):
            seq_id, step_idx = self.train_items[item_id]

            # get sequences
            seq = self.sequences[seq_id]
            targ = self.target_sequences[seq_id]

            seq_stack = []
            targ_stack = []
            for i_sub_seq in xrange(self.sub_seqence_length):

                # define time steps
                t0 = step_idx + i_sub_seq - self.half_context
                t1 = t0 + self.data_context

                # get sequence window
                seq_stack.append(seq[t0:t1])

                if self.central_target:
                    center_idx = t0 + self.half_context
                    targ_stack.append(targ[center_idx:center_idx+1])
                else:
                    targ_stack.append(targ[t0:t1])

            seq_stack = np.asarray(seq_stack)
            targ_stack = np.asarray(targ_stack)

            X.append(seq_stack)
            Y.append(targ_stack)

        X = np.asarray(X, dtype=np.float32)
        Y = np.asarray(Y, dtype=np.float32)

        return [X, Y]


if __name__ == '__main__':
    """ main """

    # create dummy data
    sequences = []
    target_sequences = []
    for i in xrange(10):
        seq_len = np.random.randint(low=200, high=500)
        sequences.append(np.random.randn(seq_len, 23))
        target_sequences.append(np.random.randn(seq_len, 3))

    data_pool = UniversalRegressionDataPool(sequences, target_sequences, sub_sequence_length=15, data_context=3,
                                            step_size=1, central_target=True)

    x, y = data_pool[0:10]
    print x.shape, y.shape

    for i in xrange(data_pool.shape[0]):
        x, y = data_pool[i:i+1]