-
Notifications
You must be signed in to change notification settings - Fork 34
/
human_simulator.py
137 lines (107 loc) · 4.78 KB
/
human_simulator.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#!python
# -*- coding: utf-8 -*-
# @author: Kun
from utils import get_content_between_a_b, parse_instructions
from prompts.human_simulator import get_input_text
from global_config import lang_opt, llm_model_opt
if "openai" == llm_model_opt:
from utils.openai_util import get_api_response
elif "vicuna" == llm_model_opt:
from utils.vicuna_util import get_api_response
elif "chatglm" == llm_model_opt:
from utils.chatglm_util import get_api_response
elif "baichuan" == llm_model_opt:
from utils.baichuan_util import get_api_response
elif "aquila" == llm_model_opt:
from utils.aquila_util import get_api_response
elif "falcon" == llm_model_opt:
from utils.falcon_util import get_api_response
else:
raise Exception("not supported llm model name: {}".format(llm_model_opt))
class Human:
def __init__(self, input, memory, embedder, model, tokenizer):
self.input = input
if memory:
self.memory = memory
else:
self.memory = self.input['output_memory']
self.embedder = embedder
self.model = model
self.tokenizer = tokenizer
self.output = {}
def prepare_input(self):
previous_paragraph = self.input["input_paragraph"]
writer_new_paragraph = self.input["output_paragraph"]
memory = self.input["output_memory"]
user_edited_plan = self.input["output_instruction"]
input_text = get_input_text(
lang_opt, previous_paragraph, memory, writer_new_paragraph, user_edited_plan)
return input_text
def parse_plan(self, response):
plan = get_content_between_a_b('Selected Plan:', 'Reason', response)
return plan
def select_plan(self, response_file): # TODO ???
previous_paragraph = self.input["input_paragraph"]
writer_new_paragraph = self.input["output_paragraph"]
memory = self.input["output_memory"]
previous_plans = self.input["output_instruction"]
prompt = f"""
Now imagine you are a helpful assistant that help a novelist with decision making. You will be given a previously written paragraph and a paragraph written by a ChatGPT writing assistant, a summary of the main storyline maintained by the ChatGPT assistant, and 3 different possible plans of what to write next.
I need you to:
Select the most interesting and suitable plan proposed by the ChatGPT assistant.
Previously written paragraph:
{previous_paragraph}
The summary of the main storyline maintained by your ChatGPT assistant:
{memory}
The new paragraph written by your ChatGPT assistant:
{writer_new_paragraph}
Three plans of what to write next proposed by your ChatGPT assistant:
{parse_instructions(previous_plans)}
Now start choosing, organize your output by strictly following the output format as below:
Selected Plan:
<copy the selected plan here>
Reason:
<Explain why you choose the plan>
"""
print(prompt+'\n'+'\n')
response = get_api_response(self.model, self.tokenizer, prompt)
plan = self.parse_plan(response)
while plan == None:
response = get_api_response(self.model, self.tokenizer, prompt)
plan = self.parse_plan(response)
if response_file:
with open(response_file, 'a', encoding='utf-8') as f:
f.write(f"Selected plan here:\n{response}\n\n")
return plan
def parse_output(self, text):
try:
if text.splitlines()[0].startswith('Extended Paragraph'):
new_paragraph = get_content_between_a_b(
'Extended Paragraph:', 'Selected Plan', text)
else:
new_paragraph = text.splitlines()[0]
lines = text.splitlines()
if lines[-1] != '\n' and lines[-1].startswith('Revised Plan:'):
revised_plan = lines[-1][len("Revised Plan:"):]
elif lines[-1] != '\n':
revised_plan = lines[-1]
output = {
"output_paragraph": new_paragraph,
# "selected_plan": selected_plan,
"output_instruction": revised_plan,
# "memory":self.input["output_memory"]
}
return output
except:
return None
def step(self, response_file=None):
prompt = self.prepare_input()
print(prompt+'\n'+'\n')
response = get_api_response(self.model, self.tokenizer, prompt)
self.output = self.parse_output(response)
while self.output == None:
response = get_api_response(self.model, self.tokenizer, prompt)
self.output = self.parse_output(response)
if response_file:
with open(response_file, 'a', encoding='utf-8') as f:
f.write(f"Human's output here:\n{response}\n\n")