-
Notifications
You must be signed in to change notification settings - Fork 1
/
test.py
144 lines (122 loc) · 5.93 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
import tensorflow as tf
from glob import glob
import numpy as np
input_size=(480, 640)
ip_files = glob('../Corrected/input/*jpg')
sr_files = glob('../Corrected/raw/*png')
op_files = glob('../Corrected/target/*jpg')
X = []
Y = []
for x, y, z in zip(ip_files, op_files, sr_files):
x = tf.keras.preprocessing.image.load_img(x, target_size=input_size)
y = tf.keras.preprocessing.image.load_img(y, target_size=input_size)
z = tf.keras.preprocessing.image.load_img(z, target_size=input_size)
x = np.asarray(x)[:, :, :1]/255.
z = np.asarray(z)[:, :, :1]/255.
x = np.concatenate([x, z], axis=2)
X.append(x)
Y.append(np.asarray(y)[:, :, :1]/255.)
X = np.array(X)
Y = np.array(Y)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32, (3, 3), padding='same', input_shape=(*input_size, 1), activation='relu'),
tf.keras.layers.Conv2D(32, (3, 3), padding='same', activation='relu'),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
tf.keras.layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu'),
tf.keras.layers.Conv2D(128, (3, 3), padding='same', activation='relu'),
# tf.keras.layers.MaxPool2D((2, 2)),
# tf.keras.layers.Conv2D(256, (3, 3), padding='same', activation='relu'),
# tf.keras.layers.Conv2DTranspose(256, (3, 3), padding='same', activation='relu'),
# tf.keras.layers.UpSampling2D((2, 2)),
tf.keras.layers.Conv2DTranspose(128, (3, 3), padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(128, (3, 3), padding='same', activation='relu'),
tf.keras.layers.UpSampling2D((2, 2)),
tf.keras.layers.Conv2DTranspose(64, (3, 3), padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(64, (3, 3), padding='same', activation='relu'),
tf.keras.layers.UpSampling2D((2, 2)),
tf.keras.layers.Conv2DTranspose(32, (3, 3), padding='same', activation='relu'),
tf.keras.layers.Conv2DTranspose(1, (3, 3), padding='same', activation='sigmoid'),
])
k = 7
input_layer = tf.keras.layers.Input(shape=(*input_size, 2))
x = tf.keras.layers.Conv2D(32, (k, k), padding='same', activation='relu')(input_layer)
x1 = tf.keras.layers.MaxPool2D((2, 2))(x)
# -------------------------------------------------------
x = tf.keras.layers.Conv2D(64, (k, k), padding='same', activation='relu')(x1)
x = tf.keras.layers.Conv2D(64, (k, k), padding='same', activation='relu')(x)
x2 = tf.keras.layers.MaxPool2D((2, 2))(x)
# -------------------------------------------------------
x3 = tf.keras.layers.Conv2D(128, (k, k), padding='same', activation='relu')(x2)
x = tf.keras.layers.Conv2D(128, (k, k), padding='same', activation='relu')(x3)
# x = # tf.keras.layers.MaxPool2D((2, 2))(x)
# x = # tf.keras.layers.Conv2D(256, (3, 3), padding='same', activation='relu')(x)
# x = # tf.keras.layers.Conv2DTranspose(256, (3, 3), padding='same', activation='relu')(x)
# x = # tf.keras.layers.UpSampling2D((2, 2))(x)
x = tf.keras.layers.Conv2DTranspose(128, (k, k), padding='same', activation='relu')(x)
x = tf.keras.layers.concatenate([x, x3], axis=3)
x = tf.keras.layers.Conv2DTranspose(128, (k, k), padding='same', activation='relu')(x)
# -------------------------------------------------------
x = tf.keras.layers.concatenate([x, x2], axis=3)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
x = tf.keras.layers.Conv2DTranspose(64, (k, k), padding='same', activation='relu')(x)
x = tf.keras.layers.Conv2DTranspose(64, (k, k), padding='same', activation='relu')(x)
# -------------------------------------------------------
x = tf.keras.layers.concatenate([x, x1], axis=3)
x = tf.keras.layers.UpSampling2D((2, 2))(x)
x = tf.keras.layers.Conv2DTranspose(32, (k, k), padding='same', activation='relu')(x)
x = tf.keras.layers.Conv2DTranspose(1, (k, k), padding='same', activation='sigmoid')(x)
def dice_loss2(y_true, y_pred):
numerator = 2 * tf.reduce_sum(y_true * y_pred, axis=-1)
denominator = tf.reduce_sum(y_true + y_pred, axis=-1)
return 1 - (numerator + 1) / (denominator + 1)
model = tf.keras.models.Model(inputs=input_layer, outputs=x)
model.summary()
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(X, Y, batch_size=4, epochs=500, validation_split=0.2)
y = model.predict(X[10:])
for i, j in enumerate(y):
tf.keras.preprocessing.image.save_img(f'{i}5.jpg', j)
# from glob import glob
# files = glob('../retcam_caps_results_90/*.jpg')
# manual = glob('../Corrected/target/*.jpg')
# print(files)
# bcdu, segcaps, segcaps_th = [], [], []
# for i, file in enumerate(files):
# if i % 3 == 0:
# bcdu.append(file)
# elif i % 3 == 1:
# segcaps.append(file)
# if i % 3 == 2:
# segcaps_th.append(file)
# image_names = []
# for file in bcdu:
# image_names.append('_'.join(file.replace('\\', '/').split('/')[-1].split('_')[:2]) + '.png')
# from PIL import Image
# import numpy as np
# images = []
# for file in image_names:
# images.append(np.asarray(Image.open('../retcam/' + file))[:, :, :3])
# bcdu_images = []
# for file in bcdu:
# bcdu_images.append(np.asarray(Image.open(file))[:, :, :3])
# segcaps_images = []
# for file in segcaps:
# segcaps_images.append(np.asarray(Image.open(file))[:, :, :3])
# segcaps_th_images = []
# for file in segcaps_th:
# segcaps_th_images.append(np.asarray(Image.open(file))[:, :, :3])
# manual_images = []
# for file in manual:
# manual_images.append(np.asarray(Image.open(file))[:, :, :3])
# image = None
# for i in range(len(bcdu)):
# if image is None:
# image = np.concatenate([bcdu_images[i], segcaps_images[i], images[i], segcaps_th_images[i], manual_images[i]], axis=1)
# else:
# img = np.concatenate([bcdu_images[i], segcaps_images[i], images[i], segcaps_th_images[i], manual_images[i]], axis=1)
# image = np.concatenate([image, img], axis=0)
# print(image.shape)
# Image.fromarray(image).save('res.jpg')