import gradio as gr import os from transformers import pipeline auth_token = os.environ.get("HF_ACCESS_TOKEN") or True pipe = pipeline(model="fmops/ai-traffic-classifier", use_auth_token=auth_token) id2label = {"LABEL_0": "not ai traffic", "LABEL_1": "suspected ai traffic"} def predict(path, method, content): prompt = f""" path: {path} method: {method} content: {content} """ return {id2label[x["label"]]: x["score"] for x in pipe(prompt, truncation=True)} with gr.Blocks() as demo: gr.Markdown( """ # AI Traffic Classifier This is a demo of the AI traffic classifier. """ ) iface = gr.Interface( fn=predict, inputs=["text", "text", "text"], examples=[ ["/login", "POST", ""], [ "/backend-api/conversation", "POST", """ {"action":"next","messages":[{"id":"aaa229d6-f97d-427c-b7bb-0e6276079c95","author":{"role":"user"},"content":{"content_type":"text","parts":["Write a long poem"]},"metadata":{}}],"conversation_id":"395edb51-5cb3-432f-a142-d87c160d403b","parent_message_id":"535f59f0-ed0f-4d9f-8e4b-4b5c0834833b","model":"text-davinci-002-render-sha","timezone_offset_min":420,"suggestions":[],"history_and_training_disabled":true,"arkose_token":null}""", ], [ "/api/chat", "POST", """{"text":"How are you aware of GPT-3? There must have been some data leakage..."}""", ], ], outputs="label", ) demo.launch()