forked from LantaoYu/SeqGAN
-
Notifications
You must be signed in to change notification settings - Fork 11
/
dataloader.py
100 lines (85 loc) · 3.63 KB
/
dataloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
import numpy as np
import random
import model_settings
class Gen_Data_loader():
def __init__(self, batch_size):
self.batch_size = batch_size
self.token_stream = []
def create_batches(self, data_file):
global pos_size
self.token_stream = []
with open(data_file, 'r') as f:
for line in f:
line = line.strip()
line = line.split()
parse_line = [int(x) for x in line]
if len(parse_line) == model_settings.seq_len:
self.token_stream.append(parse_line)
self.num_batch = int(len(self.token_stream) / self.batch_size)
self.data_size = len(self.token_stream)
pos_size = self.data_size
self.token_stream = self.token_stream[:self.num_batch * self.batch_size]
self.sequence_batch = np.split(np.array(self.token_stream), self.num_batch, 0)
self.pointer = 0
def next_batch(self):
ret = self.sequence_batch[self.pointer]
self.pointer = (self.pointer + 1) % self.num_batch
return ret
def reset_pointer(self):
self.pointer = 0
class Dis_dataloader():
def __init__(self, batch_size, ref_size=None):
self.batch_size = batch_size
if ref_size != None:
self.ref_size = ref_size
else:
self.ref_size = 16
self.sentences = np.array([])
self.labels = np.array([])
def load_train_data(self, positive_file, negative_file):
# Load data
global pos_size
positive_examples = []
negative_examples = []
with open(positive_file)as fin:
for line in fin:
if (random.random() * pos_size) < 10000:
line = line.strip()
line = line.split()
parse_line = [int(x) for x in line]
positive_examples.append(parse_line)
with open(negative_file)as fin:
for line in fin:
line = line.strip()
line = line.split()
parse_line = [int(x) for x in line]
if len(parse_line) == model_settings.seq_len:
negative_examples.append(parse_line)
self.sentences = np.array(positive_examples + negative_examples)
self.positive_examples = positive_examples
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
self.labels = np.concatenate([positive_labels, negative_labels], 0)
# Shuffle the data
shuffle_indices = np.random.permutation(np.arange(len(self.labels)))
self.sentences = self.sentences[shuffle_indices]
self.labels = self.labels[shuffle_indices]
# Split batches
self.num_batch = int(len(self.labels) / self.batch_size)
self.sentences = self.sentences[:self.num_batch * self.batch_size]
self.labels = self.labels[:self.num_batch * self.batch_size]
self.sentences_batches = np.split(self.sentences, self.num_batch, 0)
self.labels_batches = np.split(self.labels, self.num_batch, 0)
self.pointer = 0
def get_reference(self):
ref_samples = []
for _ in range(self.ref_size):
ref_samples.append(self.positive_examples[random.randint(0, len(self.positive_examples) - 1)])
return np.array(ref_samples)
def next_batch(self):
ret = self.sentences_batches[self.pointer], self.labels_batches[self.pointer], self.get_reference()
self.pointer = (self.pointer + 1) % self.num_batch
return ret
def reset_pointer(self):
self.pointer = 0