focusit commited on
Commit
ea3f118
·
1 Parent(s): 4b3c046

Delete executequery.py

Browse files
Files changed (1) hide show
  1. executequery.py +0 -99
executequery.py DELETED
@@ -1,99 +0,0 @@
1
- import torch
2
- from torch.utils.data import Dataset,DataLoader
3
- import torch.nn as nn
4
- import nltk
5
- from nltk.stem.porter import PorterStemmer
6
- import json
7
- import numpy as np
8
- import random
9
-
10
- nltk.download('punkt')
11
-
12
- def ExecuteQuery(query):
13
-
14
- class NeuralNet(nn.Module):
15
-
16
- def __init__(self,input_size,hidden_size,num_classes):
17
- super(NeuralNet,self).__init__()
18
- self.l1 = nn.Linear(input_size,hidden_size)
19
- self.l2 = nn.Linear(hidden_size,hidden_size)
20
- self.l3 = nn.Linear(hidden_size,num_classes)
21
- self.relu = nn.ReLU()
22
-
23
- def forward(self,x):
24
- out = self.l1(x)
25
- out = self.relu(out)
26
- out = self.l2(out)
27
- out = self.relu(out)
28
- out = self.l3(out)
29
- return out
30
-
31
- device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
32
-
33
- with open('intents.json', 'r') as json_data:
34
- intents = json.load(json_data)
35
-
36
- FILE = "intents.pth"
37
- data = torch.load(FILE)
38
- # with open('Data/Tasks.pth') as f:
39
- # data = torch.load(f)
40
-
41
-
42
- input_size = data["input_size"]
43
- hidden_size = data["hidden_size"]
44
- output_size = data["output_size"]
45
- all_words = data["all_words"]
46
- tags = data["tags"]
47
- model_state = data["model_state"]
48
-
49
- model = NeuralNet(input_size,hidden_size,output_size).to(device)
50
- model.load_state_dict(model_state)
51
- model.eval()
52
-
53
- Stemmer = PorterStemmer()
54
-
55
- def tokenize(sentence):
56
- return nltk.word_tokenize(sentence)
57
-
58
- def stem(word):
59
- return Stemmer.stem(word.lower())
60
-
61
- def bag_of_words(tokenized_sentence,words):
62
- sentence_word = [stem(word) for word in tokenized_sentence]
63
- bag = np.zeros(len(words),dtype=np.float32)
64
-
65
- for idx , w in enumerate(words):
66
- if w in sentence_word:
67
- bag[idx] = 1
68
-
69
- return bag
70
-
71
- sentence = str(query)
72
-
73
- sentence = tokenize(sentence)
74
- X = bag_of_words(sentence,all_words)
75
- X = X.reshape(1,X.shape[0])
76
- X = torch.from_numpy(X).to(device)
77
-
78
- output = model(X)
79
-
80
- _ , predicted = torch.max(output,dim=1)
81
-
82
- tag = tags[predicted.item()]
83
-
84
- probs = torch.softmax(output,dim=1)
85
- prob = probs[0][predicted.item()]
86
-
87
- if prob.item() >= 0.96:
88
-
89
- for intent in intents['intents']:
90
-
91
- if tag == intent["tag"]:
92
-
93
- reply = random.choice(intent["responses"])
94
-
95
- return reply, tag, prob.item()
96
- if prob.item() <= 0.95:
97
- reply = "opencosmo"
98
- tag = "opencosmo"
99
- return reply, tag, prob.item()