-
Notifications
You must be signed in to change notification settings - Fork 9
/
run_batch.py
168 lines (137 loc) · 5.38 KB
/
run_batch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
from mesa.batchrunner import BatchRunner
import argparse
import matplotlib.pyplot as plt
import time
from datetime import timedelta
import sys
import os
import pandas as pd
import pickle
from fire_evacuation.model import FireEvacuation
from fire_evacuation.agent import Human
DIR = os.path.dirname(os.path.realpath(__file__))
OUTPUT_DIR = DIR + "/output"
MIN_COLLABORATION = 0
MAX_COLLABORATION = 100
GRAPH_DPI = 100
GRAPH_WIDTH = 1920
GRAPH_HEIGHT = 1080
"""
This script starts a batch run of our model, without visualisation, for obtaining the final statistics we require, over multiple iterations
"""
# Concatenate all of the dataframe files found in the OUTPUT_DIR
def merge_dataframes():
directory = OUTPUT_DIR + "/batch_results/"
previous_dataframe_files = [
f
for f in os.listdir(directory)
if (os.path.isfile(os.path.join(directory, f)) and "dataframe_" in f)
]
# Concatenate any previous dataframes
if previous_dataframe_files:
dataframes = []
print("Merging these dataframes:", previous_dataframe_files)
for f in previous_dataframe_files:
df = pickle.load(open(directory + f, "rb"))
dataframes.append(df)
return pd.concat(dataframes, ignore_index=True), len(
dataframes
) # Concatenate all of the dataframes together, while ignoring their indexes
else:
return None, None
parser = argparse.ArgumentParser()
parser.add_argument("runs", help="Number of repeat runs to do for each parameter setup.", type=int)
parser.add_argument("human_count", help="Number of humans in the simulation.", type=int)
try:
args = parser.parse_args()
except Exception:
parser.print_help()
sys.exit(1)
runs = 1
if args.runs:
runs = args.runs
human_count = 1
if args.human_count:
human_count = args.human_count
# Fixed parameters of our batch runs
fixed_params = dict(
floor_plan_file="floorplan_testing.txt",
human_count=human_count,
fire_probability=0.8,
visualise_vision=False,
random_spawn=True,
save_plots=True,
)
# Vary percentage collaboration between MIN and MAX values above
collaboration_range = range(MIN_COLLABORATION, MAX_COLLABORATION + 1, 10)
variable_params = dict(collaboration_percentage=collaboration_range)
# At the end of each model run, calculate the percentage of people that escaped
model_reporter = {
"PercentageEscaped": lambda m: (
(FireEvacuation.count_human_status(m, Human.Status.ESCAPED) / m.human_count) * 100
)
}
# Create the batch runner
print(
"Running batch test with %i runs for each parameter and %i human agents." % (runs, human_count)
)
batch_start = time.time() # Time the batch run
# Run the batch runner 'runs' times (the number of iterations to make) and output a dataset and graphs each iteration
for i in range(1, runs + 1):
iteration_start = time.time() # Time the iteration
param_run = BatchRunner(
FireEvacuation,
variable_parameters=variable_params,
fixed_parameters=fixed_params,
model_reporters=model_reporter,
)
param_run.run_all() # Run all simulations
iteration_end = time.time()
end_timestamp = time.strftime("%Y%m%d-%H%M%S")
# Save the dataframe to a file so we have the oppurtunity to concatenate separate dataframes from separate runs
dataframe = param_run.get_model_vars_dataframe()
dataframe.to_pickle(path=OUTPUT_DIR + "/batch_results/dataframe_" + end_timestamp + ".pickle")
elapsed = iteration_end - iteration_start # Get the elapsed time in seconds
print("Batch runner finished iteration %i. Took: %s" % (i, str(timedelta(seconds=elapsed))))
dataframe, count = merge_dataframes()
del dataframe["Run"]
dataframe.groupby("collaboration_percentage")
fig = plt.figure(figsize=(GRAPH_WIDTH / GRAPH_DPI, GRAPH_HEIGHT / GRAPH_DPI), dpi=GRAPH_DPI)
plt.scatter(dataframe.collaboration_percentage, dataframe.PercentageEscaped)
fig.suptitle(
"Evacuation Success: " + str(human_count) + " Human Agents, " + str(count) + " Iterations",
fontsize=20,
)
plt.xlabel("Percentage of Humans Collaborating (%)", fontsize=14)
plt.ylabel("Percentage Escaped (%)", fontsize=14)
plt.xticks(range(MIN_COLLABORATION, MAX_COLLABORATION + 1, 10))
plt.ylim(0, 100)
plt.savefig(
OUTPUT_DIR + "/batch_graphs/batch_run_scatter_" + end_timestamp + ".png", dpi=GRAPH_DPI
)
plt.close(fig)
fig = plt.figure(figsize=(GRAPH_WIDTH / GRAPH_DPI, GRAPH_HEIGHT / GRAPH_DPI), dpi=GRAPH_DPI)
ax = fig.gca()
dataframe.boxplot(
ax=ax,
column="PercentageEscaped",
by="collaboration_percentage",
positions=list(collaboration_range),
figsize=(GRAPH_WIDTH / GRAPH_DPI, GRAPH_HEIGHT / GRAPH_DPI),
showmeans=True,
)
fig.suptitle(
"Evacuation Success: " + str(human_count) + " Human Agents, " + str(count) + " Iterations",
fontsize=20,
)
plt.xlabel("Percentage of Humans Collaborating (%)", fontsize=14)
plt.ylabel("Percentage Escaped (%)", fontsize=14)
plt.xticks(collaboration_range)
plt.ylim(0, 100)
plt.savefig(
OUTPUT_DIR + "/batch_graphs/batch_run_boxplot_" + end_timestamp + ".png", dpi=GRAPH_DPI
)
plt.close(fig)
batch_end = time.time()
elapsed = batch_end - batch_start # Get the elapsed time in seconds
print("Batch runner finished all iterations. Took: %s" % str(timedelta(seconds=elapsed)))