Manoj Kumar commited on
Commit
f1b2798
·
1 Parent(s): 83ce0d2

updated question structure

Browse files
Files changed (2) hide show
  1. README.md +1 -1
  2. database.py +67 -0
README.md CHANGED
@@ -5,7 +5,7 @@ colorFrom: red
5
  colorTo: red
6
  sdk: gradio
7
  sdk_version: 5.11.0
8
- app_file: app.py
9
  pinned: false
10
  python: 3.9
11
  ---
 
5
  colorTo: red
6
  sdk: gradio
7
  sdk_version: 5.11.0
8
+ app_file: database.py
9
  pinned: false
10
  python: 3.9
11
  ---
database.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoTokenizer, AutoModelForCausalLM
2
+
3
+ # Load model and tokenizer
4
+ model_name = "EleutherAI/gpt-neo-2.7B" # Replace with a suitable model
5
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
6
+ model = AutoModelForCausalLM.from_pretrained(model_name)
7
+
8
+ # Example schema
9
+ schema = {
10
+ "products": {
11
+ "columns": ["product_id", "name", "price", "category_id"],
12
+ "relations": "category_id -> categories.id",
13
+ },
14
+ "categories": {
15
+ "columns": ["id", "category_name"],
16
+ "relations": None,
17
+ },
18
+ "orders": {
19
+ "columns": ["order_id", "customer_name", "product_id", "order_date"],
20
+ "relations": "product_id -> products.product_id",
21
+ },
22
+ }
23
+
24
+ def generate_context(schema):
25
+ """
26
+ Generate context dynamically from the schema.
27
+ """
28
+ context = "### Database Schema ###\n\n"
29
+ for table, details in schema.items():
30
+ context += f"Table: {table}\nColumns: {', '.join(details['columns'])}\n"
31
+ if details.get("relations"):
32
+ context += f"Relations: {details['relations']}\n"
33
+ context += "\n"
34
+
35
+ context += "### Instructions ###\n"
36
+ context += (
37
+ "Generate SQL queries based on the user's question. "
38
+ "Understand the schema to identify relevant tables and relationships. "
39
+ "If the question involves multiple tables, use appropriate joins.\n"
40
+ )
41
+ return context
42
+
43
+ # Generate dynamic context
44
+ context = generate_context(schema)
45
+
46
+ def answer_question(context, question):
47
+ """
48
+ Generate an SQL query or database-related response using the model.
49
+ """
50
+ prompt = f"{context}\n\nUser Question: {question}\nSQL Query or Answer:"
51
+ inputs = tokenizer(prompt, return_tensors="pt", truncation=True)
52
+ outputs = model.generate(inputs.input_ids, max_length=256, num_beams=5, early_stopping=True)
53
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
54
+
55
+ # Interactive loop
56
+ print("Database Assistant is ready. Ask your questions!")
57
+
58
+ # Example interactive questions
59
+ questions = [
60
+ "describe the product table for me, what kind of data it is storing and all"
61
+ ]
62
+
63
+ for user_question in questions:
64
+ print(f"Question: {user_question}")
65
+ response = answer_question(context, user_question)
66
+ print("\nGenerated Response:\n", response, "\n")
67
+