anshupatel4298 commited on
Commit
045525d
1 Parent(s): 731ae2f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -57
app.py CHANGED
@@ -1,70 +1,101 @@
1
- import streamlit as st
 
 
 
2
  from transformers import BertTokenizer, TFBertForSequenceClassification
3
- import tensorflow as tf
4
- import numpy as np
5
- import requests
6
- import os
7
 
8
- # Ensure PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION is set to "python"
9
- os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python"
10
 
11
- # Paths to your models hosted on Hugging Face
12
- basic_model_url = "https://huggingface.co/anshupatel4298/bert-chatbot-model/resolve/main/basic_chatbot_model.h5"
13
- local_model_path = "basic_chatbot_model.h5"
14
- bert_model_name = "anshupatel4298/bert-chatbot-model"
15
 
16
- # Define the model architecture that matches the original one
17
- def create_model():
18
- model = tf.keras.Sequential([
19
- tf.keras.layers.InputLayer(input_shape=(1825,)), # Adjust input shape to match the original model
20
- tf.keras.layers.Dense(128, activation='relu'),
21
- tf.keras.layers.Dropout(0.5),
22
- tf.keras.layers.Dense(64, activation='relu'),
23
- tf.keras.layers.Dropout(0.5),
24
- tf.keras.layers.Dense(40, activation='softmax') # Adjust the number of classes to match the original model
25
- ])
26
- return model
27
 
28
- # Create the model
29
- basic_model = create_model()
 
 
30
 
31
- # Download the Basic Model weights from the URL if not already downloaded
32
- if not os.path.exists(local_model_path):
33
- response = requests.get(basic_model_url)
34
- with open(local_model_path, 'wb') as f:
35
- f.write(response.content)
36
 
37
- # Load the weights into the model
38
- basic_model.load_weights(local_model_path)
 
 
 
 
39
 
40
- # Load BERT Model and Tokenizer
41
- bert_model = TFBertForSequenceClassification.from_pretrained(bert_model_name)
42
- bert_tokenizer = BertTokenizer.from_pretrained(bert_model_name)
 
43
 
44
- # Set your MAX_SEQUENCE_LENGTH here
45
- MAX_SEQUENCE_LENGTH = 100
 
 
 
 
46
 
47
- # Streamlit UI
48
- st.sidebar.title("Select Model")
49
- model_choice = st.sidebar.selectbox("Choose a model:", ["Basic Model", "BERT Model"])
 
 
 
 
 
 
 
50
 
51
- st.title("Chatbot Interface")
52
-
53
- user_input = st.text_input("You:")
54
- if st.button("Send"):
55
- if user_input:
56
- if model_choice == "Basic Model":
57
- # Preprocess input for basic model
58
- tokenizer = tf.keras.preprocessing.text.Tokenizer(num_words=1825)
59
- tokenizer.fit_on_texts([user_input])
60
- tokenized_input = tokenizer.texts_to_sequences([user_input])
61
- input_data = tf.keras.preprocessing.sequence.pad_sequences(tokenized_input, maxlen=1825) # Ensure padding to match the input shape
62
- prediction = basic_model.predict(input_data)
63
- response = np.argmax(prediction, axis=-1)[0]
64
  else:
65
- # Preprocess input for BERT model
66
- inputs = bert_tokenizer(user_input, return_tensors="tf", max_length=MAX_SEQUENCE_LENGTH, truncation=True, padding="max_length")
67
- outputs = bert_model(**inputs)
68
- response = tf.argmax(outputs.logits, axis=-1).numpy()[0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
- st.write(f"Bot: {response}")
 
 
1
+ import json
2
+ import re
3
+ import nltk
4
+ from flask import Flask, render_template, request
5
  from transformers import BertTokenizer, TFBertForSequenceClassification
6
+ import logging
7
+ import random
8
+ import time
 
9
 
10
+ nltk.download('punkt')
11
+ nltk.download('wordnet')
12
 
13
+ app = Flask(__name__)
 
 
 
14
 
15
+ # Set up logging
16
+ logging.basicConfig(level=logging.DEBUG)
 
 
 
 
 
 
 
 
 
17
 
18
+ # Load the BERT tokenizer and model
19
+ model_name = "bert-base-uncased"
20
+ tokenizer = BertTokenizer.from_pretrained(model_name)
21
+ bert_model = TFBertForSequenceClassification.from_pretrained(model_name)
22
 
23
+ # Function to preprocess input text
24
+ def preprocess_input(text):
25
+ inputs = tokenizer(text, return_tensors='tf', max_length=512, truncation=True, padding='max_length')
26
+ return inputs
 
27
 
28
+ # Function to predict the class using BERT model
29
+ def predict_class(sentence, model):
30
+ inputs = preprocess_input(sentence)
31
+ outputs = model(inputs)
32
+ logits = outputs.logits
33
+ predicted_class = np.argmax(logits, axis=-1)[0] # The predicted class (index)
34
 
35
+ logging.debug(f"Logits: {logits}")
36
+ logging.debug(f"Predicted class: {predicted_class}")
37
+
38
+ return predicted_class
39
 
40
+ # Function to normalize text
41
+ def normalize_text(text):
42
+ text = text.lower()
43
+ text = re.sub(r'\s+', ' ', text) # Replace multiple spaces with single space
44
+ text = re.sub(r'[^\w\s]', '', text) # Remove punctuation
45
+ return text
46
 
47
+ # Function to find the intent based on user message
48
+ def find_intent(user_message, intents_json):
49
+ normalized_message = normalize_text(user_message)
50
+
51
+ for intent in intents_json["intents"]:
52
+ for pattern in intent["patterns"]:
53
+ normalized_pattern = normalize_text(pattern)
54
+ if normalized_pattern in normalized_message:
55
+ return intent
56
+ return None
57
 
58
+ # Function to get the response based on the found intent
59
+ def getResponse(user_message, intents_json):
60
+ intent = find_intent(user_message, intents_json)
61
+
62
+ if intent:
63
+ responses = intent.get("responses", [])
64
+ if responses:
65
+ result = random.choice(responses)
66
+ logging.debug(f"Response chosen: {result}") # Log the chosen response
67
+ return result
 
 
 
68
  else:
69
+ logging.debug(f"No responses found for intent.")
70
+ return "I'm not sure what to say about that!"
71
+
72
+ return "Sorry, I didn't understand that."
73
+
74
+ # Route for the home page
75
+ @app.route("/")
76
+ def index():
77
+ return render_template('chat.html')
78
+
79
+ # Route to handle the chat messages
80
+ @app.route("/get", methods=["GET", "POST"])
81
+ def chat():
82
+ msg = request.form["msg"]
83
+ response = chatbot_response(msg)
84
+ return response
85
+
86
+ # Function to generate the chatbot response
87
+ def chatbot_response(user_message):
88
+ # Simulate model processing time
89
+ time.sleep(random.uniform(0.5, 1.5)) # Simulate delay
90
+
91
+ # Use the "model" to get a response from the intents
92
+ data_file = open('intents.json').read()
93
+ intents = json.loads(data_file)
94
+
95
+ # Use pattern matching to get response from intents
96
+ res = getResponse(user_message, intents)
97
+ logging.debug(f"Final chatbot response: {res}")
98
+ return res
99
 
100
+ if __name__ == "__main__":
101
+ app.run(debug=True)