from threading import Thread import gradio as gr import inspect from gradio import routes from typing import List, Type import requests, os, re, asyncio, queue import math import time import datetime import requests, json from huggingface_hub import hf_hub_download from llama_cpp import Llama loop = asyncio.get_event_loop() # Monkey patch def get_types(cls_set: List[Type], component: str): docset = [] types = [] if component == "input": for cls in cls_set: doc = inspect.getdoc(cls) doc_lines = doc.split("\n") docset.append(doc_lines[1].split(":")[-1]) types.append(doc_lines[1].split(")")[0].split("(")[-1]) else: for cls in cls_set: doc = inspect.getdoc(cls) doc_lines = doc.split("\n") docset.append(doc_lines[-1].split(":")[-1]) types.append(doc_lines[-1].split(")")[0].split("(")[-1]) return docset, types routes.get_types = get_types # App code account_list = dict() account_list['id'] = "pass" name_list = dict() name_list['id'] = 'name' def chat(x): return "AI 응답입니다." def register(id, pass): if id in account_list: return "exist" else: account_list[id] = pass return "ok" def login(id, pass): if id in account_list: if account_list[id] == pass: return "login" else: return "password error" else: return "no id" def add_name(id, name): name_list[id] = name return "ok" def get_name(id): if id in name_list: return name_list['id'] else: return "no id" with gr.Blocks() as demo: count = 0 aa = gr.Interface( fn=chat, inputs=["text"], outputs="text", description="call", ) rr = gr.Interface( fn=register, inputs=["text", "text"], outputs="text", description="call", ) ll = gr.Interface( fn=login, inputs=["text", "text"], outputs="text", description="call", ) ad = gr.Interface( fn=add_name, inputs=["text", "text"], outputs="text", description="call", ) nn = gr.Interface( fn=get_name, inputs=["text"], outputs="text", description="call", ) demo.queue(max_size=32).launch(enable_queue=True)