Hasan Iqbal
commited on
Commit
•
48d16d8
1
Parent(s):
adf20e0
Fixed UUID issues
Browse files
README.md
CHANGED
@@ -18,7 +18,7 @@ pinned: false
|
|
18 |
<p align="center">
|
19 |
<a href="https://github.com/hasaniqbal777/OpenFactCheck/actions/workflows/release-patch.yaml">
|
20 |
<img src="https://img.shields.io/github/actions/workflow/status/hasaniqbal777/openfactcheck/release-patch.yaml?label=release-patch
|
21 |
-
" alt="Release">
|
22 |
</a>
|
23 |
<a href="https://github.com/hasaniqbal777/OpenFactCheck/actions/workflows/release.yaml">
|
24 |
<img src="https://img.shields.io/github/actions/workflow/status/hasaniqbal777/openfactcheck/release.yaml?label=release
|
|
|
18 |
<p align="center">
|
19 |
<a href="https://github.com/hasaniqbal777/OpenFactCheck/actions/workflows/release-patch.yaml">
|
20 |
<img src="https://img.shields.io/github/actions/workflow/status/hasaniqbal777/openfactcheck/release-patch.yaml?label=release-patch
|
21 |
+
" alt="Release Patch">
|
22 |
</a>
|
23 |
<a href="https://github.com/hasaniqbal777/OpenFactCheck/actions/workflows/release.yaml">
|
24 |
<img src="https://img.shields.io/github/actions/workflow/status/hasaniqbal777/openfactcheck/release.yaml?label=release
|
src/openfactcheck/app/evaluate_llm.py
CHANGED
@@ -93,7 +93,7 @@ def evaluate_llm(ofc: OpenFactCheck):
|
|
93 |
st.write("Please provide the following information to be included in the leaderboard.")
|
94 |
|
95 |
# Create text inputs to enter the user information
|
96 |
-
st.session_state.id =
|
97 |
st.text_input("First Name", key="input_first_name", on_change=update_first_name)
|
98 |
st.text_input("Last Name", key="input_last_name", on_change=update_last_name)
|
99 |
st.text_input("Email", key="input_email", on_change=update_email)
|
|
|
93 |
st.write("Please provide the following information to be included in the leaderboard.")
|
94 |
|
95 |
# Create text inputs to enter the user information
|
96 |
+
st.session_state.id = llm_evaluator.run_id
|
97 |
st.text_input("First Name", key="input_first_name", on_change=update_first_name)
|
98 |
st.text_input("Last Name", key="input_last_name", on_change=update_last_name)
|
99 |
st.text_input("Email", key="input_email", on_change=update_email)
|
src/openfactcheck/evaluator/llm/evaluate.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import os
|
2 |
import json
|
|
|
3 |
import pandas as pd
|
4 |
from importlib import resources as pkg_resources
|
5 |
|
@@ -80,7 +81,7 @@ class LLMEvaluator(SnowballingEvaluator, SelfAwareEvaluator, FreshQAEvaluator, F
|
|
80 |
|
81 |
# Set the attributes
|
82 |
self.model_name = None
|
83 |
-
self.run_id =
|
84 |
self.input_path = None
|
85 |
self.dataset_path = None
|
86 |
self.output_path = None
|
@@ -258,18 +259,17 @@ class LLMEvaluator(SnowballingEvaluator, SelfAwareEvaluator, FreshQAEvaluator, F
|
|
258 |
|
259 |
# Set the attributes
|
260 |
self.model_name = model_name
|
261 |
-
self.run_id = "123"
|
262 |
self.input_path = input_path
|
263 |
self.output_path = output_path
|
264 |
self.dataset_path = dataset_path
|
265 |
self.datasets = datasets
|
266 |
-
|
267 |
# Check if the output path is provided (if not, use the default template)
|
268 |
if self.output_path == "":
|
269 |
self.output_path = default_output_path
|
270 |
|
271 |
# Check if the output path exists (if not, create it)
|
272 |
-
if not os.path.exists(self.output_path):
|
273 |
os.makedirs(f"{self.output_path}/{self.run_id}")
|
274 |
|
275 |
# Check if the questions path is provided (if not, use the default template)
|
|
|
1 |
import os
|
2 |
import json
|
3 |
+
import uuid
|
4 |
import pandas as pd
|
5 |
from importlib import resources as pkg_resources
|
6 |
|
|
|
81 |
|
82 |
# Set the attributes
|
83 |
self.model_name = None
|
84 |
+
self.run_id = str(uuid.uuid4().hex)
|
85 |
self.input_path = None
|
86 |
self.dataset_path = None
|
87 |
self.output_path = None
|
|
|
259 |
|
260 |
# Set the attributes
|
261 |
self.model_name = model_name
|
|
|
262 |
self.input_path = input_path
|
263 |
self.output_path = output_path
|
264 |
self.dataset_path = dataset_path
|
265 |
self.datasets = datasets
|
266 |
+
|
267 |
# Check if the output path is provided (if not, use the default template)
|
268 |
if self.output_path == "":
|
269 |
self.output_path = default_output_path
|
270 |
|
271 |
# Check if the output path exists (if not, create it)
|
272 |
+
if not os.path.exists(f"{self.output_path}/{self.run_id}"):
|
273 |
os.makedirs(f"{self.output_path}/{self.run_id}")
|
274 |
|
275 |
# Check if the questions path is provided (if not, use the default template)
|
src/openfactcheck/evaluator/response/evaluate.py
CHANGED
@@ -57,7 +57,7 @@ class ResponseEvaluator:
|
|
57 |
"""
|
58 |
|
59 |
# Check if sample_name is provided in kwargs else generate a random one
|
60 |
-
sample_name = kwargs.get("sample_name", str(uuid.uuid4()
|
61 |
|
62 |
# Initialize the state
|
63 |
solver_output = FactCheckerState(question=question, response=response)
|
@@ -111,7 +111,7 @@ class ResponseEvaluator:
|
|
111 |
|
112 |
def evaluate_response():
|
113 |
# Check if sample_name is provided in kwargs else generate a random one
|
114 |
-
sample_name = kwargs.get("sample_name", str(uuid.uuid4()
|
115 |
|
116 |
# Initialize the state
|
117 |
solver_output = FactCheckerState(question=question, response=response)
|
|
|
57 |
"""
|
58 |
|
59 |
# Check if sample_name is provided in kwargs else generate a random one
|
60 |
+
sample_name = kwargs.get("sample_name", str(uuid.uuid4()))
|
61 |
|
62 |
# Initialize the state
|
63 |
solver_output = FactCheckerState(question=question, response=response)
|
|
|
111 |
|
112 |
def evaluate_response():
|
113 |
# Check if sample_name is provided in kwargs else generate a random one
|
114 |
+
sample_name = kwargs.get("sample_name", str(uuid.uuid4()))
|
115 |
|
116 |
# Initialize the state
|
117 |
solver_output = FactCheckerState(question=question, response=response)
|