from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM | |
# Load the model and tokenizer from Hugging Face | |
model_name = "mistralai/Mistral-7B-Instruct-v0.3" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
# Initialize the text-generation pipeline | |
pipe = pipeline("conversational", model=model, tokenizer=tokenizer) | |
# Function to handle user input and generate responses | |
def electronics_chatbot(): | |
print("Welcome to the Electronics Components Chatbot! Type 'exit' to quit.") | |
print("Ask me about any electronics component, such as 'What is a resistor?' or 'How does a capacitor work in a circuit?'") | |
# Initialize conversation history (for context) | |
conversation_history = [] | |
while True: | |
user_input = input("You: ") | |
if user_input.lower() == 'exit': | |
print("Goodbye!") | |
break | |
# Add user input to conversation history (optional for more context) | |
conversation_history.append(user_input) | |
# Prepare the message input for the model | |
messages = [{"role": "user", "content": user_input}] | |
# Generate the response using the model | |
response = pipe(messages) | |
# Display the response | |
print(f"Ollama (Bot): {response[0]['generated_text']}\n") | |
# Optionally, append the bot's response to the conversation history for context | |
conversation_history.append(response[0]['generated_text']) | |
# Start the chatbot | |
electronics_chatbot() | |