File size: 1,331 Bytes
ee650ed 79bf268 ee650ed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import random
from openai import OpenAI
# Point to the local server
client = OpenAI(base_url="http://localhost:1234/v1", api_key="lm-studio")
history = [
{"role": "system", "content": "You are an intelligent assistant. You always provide well-reasoned answers that are both correct and helpful."},
]
# Open the file in read mode
with open('Model_Test_Issues_zh_en_jp.txt', 'r', encoding='utf-8') as file:
# Read all lines from the file
lines = file.readlines()
# Loop indefinitely
while True:
# Choose a random line from the file
line = random.choice(lines).strip()
print(line)
# Add the line as the user's content to the history
history.append({"role": "user", "content": line})
# Generate the response
completion = client.chat.completions.create(
model="mod/Repository",
messages=history,
temperature=0.7,
stream=True,
stop=["### Evaluation:","<|end_of_text|>","Translation:"]
)
new_message = {"role": "assistant", "content": ""}
for chunk in completion:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)
new_message["content"] += chunk.choices[0].delta.content
history.append(new_message)
print() |