File size: 3,335 Bytes
207b8e0
4e510e1
ed1f8da
 
 
 
 
 
b353c0d
4e510e1
9dfac8f
ed1f8da
 
 
 
 
 
b353c0d
 
 
 
 
 
 
9dfac8f
ed1f8da
 
1d582be
ed1f8da
9dfac8f
 
4e510e1
 
 
 
b353c0d
 
 
 
 
 
e3bfdb0
 
 
 
 
 
 
 
 
afcfca9
83ef605
e3bfdb0
b353c0d
 
 
 
 
 
e3bfdb0
b353c0d
 
 
 
 
 
 
 
 
 
 
9dfac8f
b353c0d
ed1f8da
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import requests
from langchain_openai import ChatOpenAI
import json
from langchain_core.prompts import ChatPromptTemplate
from langchain_groq import ChatGroq
from langchain.chains import ConversationChain
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_core.output_parsers import JsonOutputParser
from langchain_community.llms.cloudflare_workersai import CloudflareWorkersAI

from langchain_cohere import ChatCohere
def langchainConversation(conversation):
    prompts = []
    for message in conversation:
        prompts.append((message['role'],message['context']))
    chat_template = ChatPromptTemplate.from_messages(prompts)
    return chat_template.format_messages()
def segmind_input_parser(input):
    toreturn = []
    for thisdict in input:
        toreturn.append({'role':thisdict['role'],'content':thisdict['context']})
    return toreturn
def segmind_output_parser(input):
    return json.dumps({"content": input['choices'][0]['message']['content'], "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": None, "id": input['id'], "example": False, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": {"input_tokens": input['usage']['prompt_tokens'], "output_tokens": input['usage']['completion_tokens'], "total_tokens": input['usage']['total_tokens']}},indent=4)
def converse(conversation,provider,model,key,other:dict={}):
    if(provider=='groq'):
        chat = ChatGroq(temperature=0, groq_api_key=key, model_name=model)
    elif(provider=='gemini'):
        chat = ChatGoogleGenerativeAI(model=model,google_api_key=key)
    elif(provider=='cohere'):
        chat = ChatCohere(model=model,cohere_api_key=key)
    elif(provider=='lepton'):
        url = f'https://{model}.lepton.run/api/v1/'
        print(url)
        chat = ChatOpenAI(openai_api_base = url,openai_api_key=key)
    elif(provider == 'cloudflare'):
        try:
            account_id = key.split('~')[0]
            api_token = key.split('~')[1]
        except:
            raise Exception('Invalid Accound Id or api token')
        API_BASE_URL = f"https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run/"
        headers = {"Authorization": f"Bearer {api_token}"}


        def run(model, inputs):
            input = { "messages": inputs }
            response = requests.post(f"{API_BASE_URL}{model}", headers=headers, json=input)
            return response.json()
        inputs = segmind_input_parser(conversation)
        output = run(model, inputs)
        print(output)
        return {'content':output['result']['response']}
    elif(provider == 'openrouter'):
        chat = ChatOpenAI(
            base_url="https://openrouter.ai/api/v1",
            api_key=key,
            model=model
        )
    elif(provider == 'segmind'):
        url = f"https://api.segmind.com/v1/{model}"

        # Request payload
        data = {
            "messages": segmind_input_parser(conversation)
        }

        response = requests.post(url, json=data, headers={'x-api-key': key})
        output = json.loads(response.text)
        print(json.dumps(output,indent=4))
        return segmind_output_parser(output)
    else:
        return json.dumps({'content':'unspported Provider'})
    return json.dumps(json.loads(chat.invoke(langchainConversation(conversation)).json()),indent=4)