smartinez1 commited on
Commit
9fa8017
·
verified ·
1 Parent(s): fdac04c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +52 -1
README.md CHANGED
@@ -46,7 +46,58 @@ tags:
46
 
47
  <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
 
49
- [More Information Needed]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
  ### Downstream Use [optional]
52
 
 
46
 
47
  <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
48
 
49
+ ```python
50
+
51
+ from huggingface_hub import login
52
+ from transformers import BitsAndBytesConfig, AutoModelForCausalLM, AutoTokenizer
53
+ import torch
54
+ from peft import PeftModel, PeftConfig
55
+ from transformers import AutoModelForCausalLM, pipeline
56
+
57
+
58
+ config = PeftConfig.from_pretrained("smartinez1/Llama-3.1-8B-FINLLM")
59
+ base_model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-3.1-8B")
60
+ model = PeftModel.from_pretrained(base_model, "smartinez1/Llama-3.1-8B-FINLLM")
61
+ # Load the tokenizer associated with the base model
62
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-3.1-8B")
63
+
64
+ # Set up the text generation pipeline with the PEFT model, specifying the device
65
+ generator = pipeline("text-generation", model=model, tokenizer=tokenizer, device=device)
66
+
67
+ # List of user inputs
68
+ user_inputs = [
69
+ "Provide a link for Credit Card Accountability Responsibility and Disclosure Act law.",
70
+ "Define the following term: National Automated Clearing House Association.",
71
+ "Expand the following acronym into its full form: CIA."
72
+ ]
73
+
74
+ # Define the prompt template
75
+ prompt_template = """Below is an instruction that describes a task. Write a response that appropriately completes the request.
76
+
77
+ ### Instruction:
78
+ {0}
79
+
80
+ ### Answer:
81
+ {1}
82
+ """
83
+
84
+ # Loop over each user input and generate a response
85
+ for user_input in user_inputs:
86
+ # Format the user input into the prompt
87
+ prompt = prompt_template.format(user_input, "")
88
+
89
+ # Generate a response from the model
90
+ response = generator(prompt, max_length=200, num_return_sequences=1, do_sample=True)
91
+
92
+ # Extract and clean up the AI's response
93
+ response_str = response[0]['generated_text'].split('### Answer:')[1].strip()
94
+ cut_ind = response_str.find("#") # Remove extra information after the response
95
+ response_str = response_str[:cut_ind].strip() if cut_ind != -1 else response_str
96
+
97
+ # Display the AI's response
98
+ print(f"User: {user_input}")
99
+ print(f"AI: {response_str}")
100
+ print("-" * 50) # Separator for clarity
101
 
102
  ### Downstream Use [optional]
103