File size: 1,595 Bytes
6b12e36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM

# Load the model and tokenizer from Hugging Face
model_name = "mistralai/Mistral-7B-Instruct-v0.3"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Initialize the text-generation pipeline
pipe = pipeline("conversational", model=model, tokenizer=tokenizer)

# Function to handle user input and generate responses
def electronics_chatbot():
    print("Welcome to the Electronics Components Chatbot! Type 'exit' to quit.")
    print("Ask me about any electronics component, such as 'What is a resistor?' or 'How does a capacitor work in a circuit?'")

    # Initialize conversation history (for context)
    conversation_history = []

    while True:
        user_input = input("You: ")

        if user_input.lower() == 'exit':
            print("Goodbye!")
            break

        # Add user input to conversation history (optional for more context)
        conversation_history.append(user_input)

        # Prepare the message input for the model
        messages = [{"role": "user", "content": user_input}]
        
        # Generate the response using the model
        response = pipe(messages)
        
        # Display the response
        print(f"Ollama (Bot): {response[0]['generated_text']}\n")
        
        # Optionally, append the bot's response to the conversation history for context
        conversation_history.append(response[0]['generated_text'])

# Start the chatbot
electronics_chatbot()