Spaces:
Runtime error
Runtime error
File size: 1,875 Bytes
478ada8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 |
from transformers import AutoTokenizer, AutoModelForCausalLM
import requests
import gradio as gr
# Load the model and tokenizer from Hugging Face
tokenizer = AutoTokenizer.from_pretrained("LaierTwoLabsInc/Satoshi-7B")
model = AutoModelForCausalLM.from_pretrained("LaierTwoLabsInc/Satoshi-7B")
# Function to fetch BTC price from CoinGecko API
def fetch_btc_price():
url = "https://api.coingecko.com/api/v3/simple/price"
params = {'ids': 'bitcoin', 'vs_currencies': 'usd'}
response = requests.get(url, params=params)
if response.status_code == 200:
data = response.json()
return data['bitcoin']['usd']
return None
# Function to generate a response based on the prompt
def generate_custom_response(prompt):
# Encode the input prompt
inputs = tokenizer(prompt, return_tensors="pt")
# Generate a response from the model
outputs = model.generate(inputs['input_ids'], max_length=200, num_return_sequences=1)
# Decode the generated response
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
# Function to fetch BTC price and generate analysis based on the prompt
def btc_analysis(prompt):
btc_price = fetch_btc_price()
if btc_price:
full_prompt = f"Bitcoin's current price is ${btc_price}. {prompt}"
ai_response = generate_custom_response(full_prompt)
return ai_response
else:
return "Error fetching Bitcoin price."
# Gradio Interface for BTC analysis
interface = gr.Interface(
fn=btc_analysis,
inputs=gr.Textbox(value="What does this price mean for investors and the market?", label="Prompt"),
outputs="text",
title="Bitcoin Price Analysis",
description="Fetch Bitcoin's current price and get analysis based on the provided prompt using Hugging Face's model."
)
# Launch the Gradio app
interface.launch()
|