Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -27,27 +27,23 @@ I asked Generative AI Models about their context window. Their response was intr
|
|
27 |
The context window for a large language model (LLM) like OpenAI’s GPT refers to the maximum amount of text the model can consider at any one time when generating a response. This includes both the prompt provided by the user and the model’s generated text.
|
28 |
In practical terms, the context window limits how much previous dialogue the model can “remember” during an interaction. If the interaction exceeds the context window, the model loses access to the earliest parts of the conversation. This limitation can impact the model’s consistency in long conversations or complex tasks.
|
29 |
"""
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
print(list(json_object.keys())[-1])
|
48 |
-
else:
|
49 |
-
cnt+=1
|
50 |
-
|
51 |
|
52 |
def get_nouns(text=text,steps=1):
|
53 |
control_len=control_json['leng']-steps
|
@@ -57,37 +53,25 @@ def get_nouns(text=text,steps=1):
|
|
57 |
val_len=len(control_val)
|
58 |
print(control_char)
|
59 |
print(control_val)
|
|
|
60 |
json_object={}
|
61 |
-
sen_list=[]
|
62 |
noun_list={}
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
sen_list.append(str(sentence))
|
68 |
-
|
69 |
-
|
70 |
-
noun_box=[]
|
71 |
-
for ea in blob.parse().split(" "):
|
72 |
-
#print(ea)
|
73 |
-
n=ea.split("/")
|
74 |
-
if n[1] == "NN":
|
75 |
-
noun_box.append(n[0])
|
76 |
|
77 |
-
|
78 |
-
print(sen_list)
|
79 |
key_cnt=len(sen_list)
|
80 |
-
noun_cnt=len(noun_box)
|
81 |
print(key_cnt)
|
82 |
-
|
|
|
83 |
|
84 |
big_cnt=0
|
85 |
cnt=0
|
86 |
go=True
|
87 |
-
a="Z"
|
88 |
|
89 |
n_cnt=0
|
90 |
-
nx=
|
91 |
while True:
|
92 |
if nx > 1:
|
93 |
n_cnt+=1
|
@@ -99,23 +83,7 @@ def get_nouns(text=text,steps=1):
|
|
99 |
print("#######")
|
100 |
steps=n_cnt
|
101 |
break
|
102 |
-
|
103 |
-
step_allot=char_len**steps
|
104 |
-
print(step_allot)
|
105 |
-
div_raw=(step_allot/noun_cnt)
|
106 |
-
print(div_raw)
|
107 |
-
div_steps=int(step_allot/noun_cnt)
|
108 |
-
print(div_steps)
|
109 |
-
div_remain=div_raw-div_steps
|
110 |
-
print(div_remain)
|
111 |
-
steps_mult=div_remain*char_len
|
112 |
-
#steps_mult=div_remain*char_len
|
113 |
-
print(steps_mult)
|
114 |
-
print(math.ceil(steps_mult))
|
115 |
-
step_list=[]
|
116 |
|
117 |
-
step_control=""
|
118 |
-
step_cont_box=[]
|
119 |
for ii in range(steps):
|
120 |
print(ii)
|
121 |
step_cont_box.append(0)
|
@@ -123,11 +91,8 @@ def get_nouns(text=text,steps=1):
|
|
123 |
mod=0
|
124 |
pos=len(step_cont_box)-1
|
125 |
|
126 |
-
if noun_cnt > step_allot:
|
127 |
-
go=False
|
128 |
-
print("Increase Steps")
|
129 |
if go:
|
130 |
-
for i, ea in enumerate(
|
131 |
if go:
|
132 |
if cnt > char_len-1:
|
133 |
#print(step_cont_box)
|
@@ -145,20 +110,21 @@ def get_nouns(text=text,steps=1):
|
|
145 |
cnt+=1
|
146 |
print(step_cont_box)
|
147 |
out_js=""
|
148 |
-
for
|
149 |
print(j)
|
150 |
out_js = out_js+control_char[j]
|
151 |
-
|
152 |
-
|
|
|
153 |
|
154 |
big_cnt+=1
|
155 |
-
if big_cnt==
|
156 |
print("DONE")
|
157 |
go=False
|
158 |
|
159 |
|
160 |
|
161 |
-
return
|
162 |
|
163 |
|
164 |
|
|
|
27 |
The context window for a large language model (LLM) like OpenAI’s GPT refers to the maximum amount of text the model can consider at any one time when generating a response. This includes both the prompt provided by the user and the model’s generated text.
|
28 |
In practical terms, the context window limits how much previous dialogue the model can “remember” during an interaction. If the interaction exceeds the context window, the model loses access to the earliest parts of the conversation. This limitation can impact the model’s consistency in long conversations or complex tasks.
|
29 |
"""
|
30 |
+
def get_sen_list(text):
|
31 |
+
sen_list=[]
|
32 |
+
blob = TextBlob(text)
|
33 |
+
for sentence in blob.sentences:
|
34 |
+
sen_list.append(str(sentence))
|
35 |
+
return sen_list
|
36 |
+
|
37 |
+
def proc_sen(sen_list,cnt):
|
38 |
+
blob_n = TextBlob(sen_list[cnt])
|
39 |
+
noun_p=blob_n.noun_phrases
|
40 |
+
noun_box1=[]
|
41 |
+
for ea in blob_n.parse().split(" "):
|
42 |
+
n=ea.split("/")
|
43 |
+
if n[1] == "NN":
|
44 |
+
noun_box1.append(n[0])
|
45 |
+
json_object={'sentence':sen_list[cnt],'noun_phrase':noun_p,'nouns':noun_box}
|
46 |
+
return json_object
|
|
|
|
|
|
|
|
|
47 |
|
48 |
def get_nouns(text=text,steps=1):
|
49 |
control_len=control_json['leng']-steps
|
|
|
53 |
val_len=len(control_val)
|
54 |
print(control_char)
|
55 |
print(control_val)
|
56 |
+
|
57 |
json_object={}
|
|
|
58 |
noun_list={}
|
59 |
+
step_list=[]
|
60 |
+
step_cont_box=[]
|
61 |
+
|
62 |
+
sen_list=get_sen_list(text)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
|
|
|
|
64 |
key_cnt=len(sen_list)
|
|
|
65 |
print(key_cnt)
|
66 |
+
#noun_cnt=len(noun_box)
|
67 |
+
#print(noun_cnt)
|
68 |
|
69 |
big_cnt=0
|
70 |
cnt=0
|
71 |
go=True
|
|
|
72 |
|
73 |
n_cnt=0
|
74 |
+
nx=key_cnt
|
75 |
while True:
|
76 |
if nx > 1:
|
77 |
n_cnt+=1
|
|
|
83 |
print("#######")
|
84 |
steps=n_cnt
|
85 |
break
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
|
|
|
|
|
87 |
for ii in range(steps):
|
88 |
print(ii)
|
89 |
step_cont_box.append(0)
|
|
|
91 |
mod=0
|
92 |
pos=len(step_cont_box)-1
|
93 |
|
|
|
|
|
|
|
94 |
if go:
|
95 |
+
for i, ea in enumerate(sen_list):
|
96 |
if go:
|
97 |
if cnt > char_len-1:
|
98 |
#print(step_cont_box)
|
|
|
110 |
cnt+=1
|
111 |
print(step_cont_box)
|
112 |
out_js=""
|
113 |
+
for iii,j in enumerate(step_cont_box):
|
114 |
print(j)
|
115 |
out_js = out_js+control_char[j]
|
116 |
+
sen_obj=proc_sen(sen_list,i)
|
117 |
+
#json_out[out_js]={'nouns':ea}
|
118 |
+
json_out[out_js]=sen_obj
|
119 |
|
120 |
big_cnt+=1
|
121 |
+
if big_cnt==key_cnt:
|
122 |
print("DONE")
|
123 |
go=False
|
124 |
|
125 |
|
126 |
|
127 |
+
return json_out,noun_list
|
128 |
|
129 |
|
130 |
|