EasyDetect / pipeline /run_pipeline.py
sunnychenxiwang's picture
update all
24c4def
raw
history blame
2.28 kB
# import sys
# sys.path.append("/home/wcx/wcx/EasyDetect/pipeline")
from pipeline.claim_generate import *
from pipeline.query_generate import *
from pipeline.tool_execute import *
from pipeline.judge import *
from pipeline.openai_wrapper import *
class Pipeline:
def __init__(self):
# 全局只实例化一个对象 会不会干扰prompt的结果
self.syncchat = SyncChat(model="gpt-4-1106-preview", api_key="sk-jD8DeGdJKrdOxpiQ5bD4845bB53346C3A0E9Ed479bE08676", base_url="https://oneapi.xty.app/v1")
self.asyncchat = AsyncChat(model="gpt-4-1106-preview", api_key="sk-jD8DeGdJKrdOxpiQ5bD4845bB53346C3A0E9Ed479bE08676", base_url="https://oneapi.xty.app/v1")
self.visionchat = VisionChat(model="gpt-4-vision-preview", api_key="sk-jD8DeGdJKrdOxpiQ5bD4845bB53346C3A0E9Ed479bE08676", base_url="https://oneapi.xty.app/v1")
self.claim_generator = ClaimGenerator(prompt_path="/home/wcx/wcx/EasyDetect/prompts/claim_generate.yaml",chat=self.syncchat)
self.query_generator = QueryGenerator(prompt_path="/home/wcx/wcx/EasyDetect/prompts/query_generate.yaml",chat=self.asyncchat, type="image-to-text")
self.tool = Tool()
self.judger = Judger(prompt_path="/home/wcx/wcx/EasyDetect/prompts/verification.yaml", chat=self.visionchat, type="image-to-text")
def run(self, text, image_path):
response, claim_list = self.claim_generator.get_response(text=text)
objects, attribute_ques_list, scenetext_ques_list, fact_ques_list = self.query_generator.get_response(claim_list=claim_list)
object_res, attribue_res, text_res, fact_res = self.tool.execute(image_path=image_path,
new_path="/newdisk3/wcx/MLLM/image-to-text/cache",
objects=objects,
attribute_list=attribute_ques_list,
scenetext_list=scenetext_ques_list,
fact_list=fact_ques_list)
# response = self.judger.get_response(object_res, attribue_res, text_res, fact_res, claim_list, image_path)
return object_res["phrases"]