-
Notifications
You must be signed in to change notification settings - Fork 4
/
calc_cca.py
162 lines (112 loc) · 5.1 KB
/
calc_cca.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
# -*- coding: utf-8 -*-
"""
Calculating CCA (Canonical Correlation Analysis) over the produced and ground truth gestures
@author: Taras Kucherenko
"""
import argparse
import glob
import os
import numpy as np
from cca import calculate_CCA_score, find_CCA_scaling_vectors
def shorten(arr_one, arr_two):
"""
Make sure that two arrays have the same length
Args:
arr_one: array one
arr_two: array two
Returns:
shortened versions of both arrays
"""
min_len = min(arr_one.shape[0], arr_two.shape[0])
arr_one = arr_one[:min_len]
arr_two = arr_two[:min_len]
return arr_one, arr_two
def save_result(lines, out_dir):
"""Write computed measure to CSV
Args:
lines: list of strings to be written
out_dir: output directory
measure: used measure
"""
# Make output directory
if not os.path.exists(out_dir):
os.makedirs(out_dir)
outname = os.path.join(out_dir, 'cca.csv')
with open(outname, 'w') as out_file:
out_file.writelines(lines)
def evaluate_folder(cond_name, coords_dir):
"""
Calculate numerical measure for the coordinates in the given folder
Args:
cond_name: name of the condition / folder to evaluate
coords_dir: folder where all the data for the current model is stored
Returns:
nothing, prints out the metrics results
"""
cond_dir = os.path.join(coords_dir, cond_name)
gt_dir = os.path.join(coords_dir, "GT")
generated_files = sorted(glob.glob(os.path.join(cond_dir, '*.npy')))
gt_files = sorted(glob.glob(os.path.join(gt_dir, '*.npy')))
# First - find the CCA scaling vectors using all the data for the given model
all_predicted_frames = []
all_ground_tr_frames = []
for predicted_file, gt_file in zip(generated_files, gt_files):
# read and flatten the predicted values
predicted_coords = np.load(predicted_file)
predicted_coords = np.reshape(predicted_coords, (predicted_coords.shape[0], -1))
# read and flatten the ground truth values
original_coords = np.load(gt_file)
original_coords = np.reshape(original_coords, (original_coords.shape[0], -1))
# make sure sequences have the same length
predicted_coords, original_coords = shorten(predicted_coords, original_coords)
if len(all_predicted_frames) != 0:
all_predicted_frames = np.concatenate((all_predicted_frames, predicted_coords), axis=0)
all_ground_tr_frames = np.concatenate((all_ground_tr_frames, original_coords), axis=0)
else:
all_predicted_frames = predicted_coords
all_ground_tr_frames = original_coords
# find CCA models
cca_model = find_CCA_scaling_vectors(all_predicted_frames, all_ground_tr_frames)
# calculate Global CCA value
#global_cca_value = calculate_CCA_score(all_predicted_frames, all_ground_tr_frames, cca_model)
#print('{:s} Global CCA value: {:.5f}'.format(cond_name, global_cca_value))
# calculate CCA value for each sequence
predicted_out_lines = [','.join(['file']) + '\n']
predicted_errors = []
for predicted_file, gt_file in zip(generated_files, gt_files):
# read and flatten the predicted values
predicted_coords = np.load(predicted_file)
predicted_coords = np.reshape(predicted_coords, (predicted_coords.shape[0], -1))
# read and flatten the ground truth values
original_coords = np.load(gt_file)
original_coords = np.reshape(original_coords, (original_coords.shape[0], -1))
# make sure sequences have the same length
predicted_coords, original_coords = shorten(predicted_coords, original_coords)
# calculate CCA value
current_cca_value = calculate_CCA_score(original_coords, predicted_coords, cca_model)
predicted_errors.append(current_cca_value)
basename = os.path.basename(predicted_file)
predicted_line = basename
predicted_line += ',' + str(current_cca_value) + '\n'
predicted_out_lines.append(predicted_line)
predicted_average_line = 'Average'
error_avgs = np.mean(predicted_errors, axis=0)
error_stds = np.std(predicted_errors, axis=0)
predicted_average_line += ',' + str(error_avgs)
predicted_out_lines.append(predicted_average_line)
predicted_out_dir = os.path.join("result", cond_name)
save_result(predicted_out_lines, predicted_out_dir)
print('{:s}: {:.2f} +- {:.2F}'.format(cond_name, np.mean(predicted_errors), error_stds))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Calculate prediction errors')
parser.add_argument('--coords_dir', '-c', default='data',
help='Predicted gesture directory')
args = parser.parse_args()
# Make sure that data is stored in the correct folder
if not os.listdir(args.coords_dir):
print("--coords_dir argument is wrong. there is no data at the folder '", args.coords_dir, "'")
exit(-1)
print('CCA:')
for cond_name in os.listdir(args.coords_dir):
evaluate_folder(cond_name, args.coords_dir)