tanveeshsingh commited on
Commit
d3feef7
1 Parent(s): 0ce1955

Changes for collinear sdk

Browse files
Files changed (1) hide show
  1. app.py +17 -55
app.py CHANGED
@@ -1,65 +1,27 @@
1
  import gradio as gr
2
  from collinear import Collinear
3
- conv_template = Template(
4
- """
5
- # Context:
6
- {{ document }}
7
-
8
- # Claim:
9
- {% for message in conversation %}
10
- {{ message.role }}: {{ message.content }}
11
- {% endfor %}
12
- """
13
- )
14
 
15
- qa_template = Template(
16
- """
17
- # Context:
18
- {{ document }}
19
-
20
- # Claim:
21
- user: {{ question }}
22
- assistant: {{ answer }}
23
- """
24
- )
25
-
26
- nli_template = Template(
27
- """
28
- # Context:
29
- {{ document }}
30
-
31
- # Claim:
32
- assistant: {{ claim }}
33
- """
34
- )
35
-
36
-
37
- # Function to dynamically update inputs based on the input style
38
  def update_inputs(input_style):
39
- # if input_style == "Conv":
40
- # return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
41
- # elif input_style == "NLI":
42
- # return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
43
- # elif input_style == "QA format":
44
- # return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
45
 
46
 
47
  # Function to judge reliability based on the selected input format
48
- def judge_reliability(input_style, document, conversation, claim, question, answer):
49
- # with torch.no_grad():
50
- # if input_style == "Conv":
51
- # conversation = json.loads(conversation)
52
- # text = conv_template.render(document=document, conversation=conversation)
53
- # elif input_style == "NLI":
54
- # text = nli_template.render(document=document, claim=claim)
55
- # elif input_style == "QA format":
56
- # text = qa_template.render(document=document, question=question, answer=answer)
57
-
58
- # print(text)
59
-
60
- # outputs = model_pipeline(text)
61
- # results = f"Reliability Judge Outputs: {outputs}"
62
- # return results
63
 
64
 
65
 
 
1
  import gradio as gr
2
  from collinear import Collinear
3
+ import os
4
+ collinear = Collinear(access_token=os.getenv('COLLINEAR_API_KEY'))
 
 
 
 
 
 
 
 
 
5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  def update_inputs(input_style):
7
+ if input_style == "Conv":
8
+ return gr.update(visible=True), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=False)
9
+ elif input_style == "NLI":
10
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=True), gr.update(visible=False), gr.update(visible=False)
11
+ elif input_style == "QA format":
12
+ return gr.update(visible=True), gr.update(visible=False), gr.update(visible=False), gr.update(visible=True), gr.update(visible=True)
13
 
14
 
15
  # Function to judge reliability based on the selected input format
16
+ async def judge_reliability(input_style, document, conversation, claim, question, answer):
17
+ if input_style == "Conv":
18
+ outputs= await collinear.judge.veritas.conversation(document,conversation[:-1],conversation[-1])
19
+ elif input_style == "NLI":
20
+ outputs = await collinear.judge.veritas.natural_language_inference(document,claim)
21
+ elif input_style == "QA format":
22
+ outputs = await collinear.judge.veritas.question_answer(document,question,answer)
23
+ results = f"Reliability Judge Outputs: {outputs}"
24
+ return results
 
 
 
 
 
 
25
 
26
 
27