|
import streamlit as st |
|
from transformers import pipeline |
|
from datasets import load_dataset |
|
|
|
|
|
model_name = "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF" |
|
pipe = pipeline("text-generation", model=model_name) |
|
|
|
|
|
ds = load_dataset("refugee-law-lab/canadian-legal-data", "default", split="train") |
|
|
|
|
|
st.title("Canadian Legal Text Generator") |
|
st.write("Enter a prompt related to Canadian legal data and generate text using Llama-3.1.") |
|
|
|
|
|
st.subheader("Sample Data from Canadian Legal Dataset:") |
|
st.write(ds[:5]) |
|
|
|
|
|
prompt = st.text_area("Enter your prompt:", placeholder="Type something...") |
|
|
|
if st.button("Generate Response"): |
|
if prompt: |
|
|
|
with st.spinner("Generating response..."): |
|
generated_text = pipe(prompt, max_length=100, do_sample=True, temperature=0.7)[0]["generated_text"] |
|
st.write("**Generated Text:**") |
|
st.write(generated_text) |
|
else: |
|
st.write("Please enter a prompt to generate a response.") |
|
|