kwabs22 commited on
Commit
c6418f1
·
1 Parent(s): cce05a1

Timeline Brainstorm

Browse files
Files changed (2) hide show
  1. app.py +50 -121
  2. relatively_constant_variables.py +113 -0
app.py CHANGED
@@ -80,119 +80,10 @@ from relatively_constant_variables import *
80
 
81
 
82
  #---------
83
- #----------
84
-
85
- # # Initialize GPU tensor
86
- # zero = torch.Tensor([0]).cuda()
87
- # print(zero.device) # This will print 'cpu' outside the @spaces.GPU decorated function
88
-
89
- # # Load the embedding model
90
- # embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
91
-
92
- # # Load the Qwen model and tokenizer
93
- # llmguide_model = AutoModelForCausalLM.from_pretrained(
94
- # "Qwen/Qwen2-0.5B-Instruct",
95
- # torch_dtype="auto",
96
- # device_map="auto"
97
- # )
98
- # llmguide_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2-0.5B-Instruct")
99
-
100
- # # Sample knowledge base (replace with your own data)
101
- # knowledge_base = [
102
- # "The capital of France is Paris.",
103
- # "Python is a popular programming language.",
104
- # "Machine learning is a subset of artificial intelligence.",
105
- # "The Earth orbits around the Sun.",
106
- # "orbits are a group of fans of a music group"
107
- # ]
108
-
109
- # # Create embeddings for the knowledge base
110
- # knowledge_base_embeddings = embedding_model.encode(knowledge_base)
111
-
112
- # def retrieve(query, k=2):
113
- # query_embedding = embedding_model.encode([query])
114
- # similarities = torch.nn.functional.cosine_similarity(torch.tensor(query_embedding), torch.tensor(knowledge_base_embeddings))
115
- # top_k_indices = similarities.argsort(descending=True)[:k]
116
- # return [knowledge_base[i] for i in top_k_indices]
117
-
118
- # def get_resource_usage():
119
- # ram_usage = psutil.virtual_memory().percent
120
- # gpu_memory_allocated = torch.cuda.memory_allocated() / (1024 ** 3) # Convert to GB
121
- # gpu_memory_reserved = torch.cuda.memory_reserved() / (1024 ** 3) # Convert to GB
122
- # return f"RAM Usage: {ram_usage:.2f}%, GPU Memory Allocated: {gpu_memory_allocated:.2f}GB, GPU Memory Reserved: {gpu_memory_reserved:.2f}GB"
123
-
124
- # @spaces.GPU
125
- # def llmguide_generate_response(prompt, stream=False):
126
- # print(zero.device) # This will print 'cuda:0' inside the @spaces.GPU decorated function
127
-
128
- # messages = [
129
- # {"role": "system", "content": "You are a helpful assistant."},
130
- # {"role": "user", "content": prompt}
131
- # ]
132
- # text = llmguide_tokenizer.apply_chat_template(
133
- # messages,
134
- # tokenize=False,
135
- # add_generation_prompt=True
136
- # )
137
- # model_inputs = llmguide_tokenizer([text], return_tensors="pt").to(zero.device)
138
-
139
- # start_time = time.time()
140
- # total_tokens = 0
141
-
142
- # if stream:
143
- # streamer = TextIteratorStreamer(llmguide_tokenizer, skip_special_tokens=True)
144
- # generation_kwargs = dict(
145
- # model_inputs,
146
- # streamer=streamer,
147
- # max_new_tokens=512,
148
- # temperature=0.7,
149
- # )
150
- # thread = Thread(target=llmguide_model.generate, kwargs=generation_kwargs)
151
- # thread.start()
152
-
153
- # generated_text = ""
154
- # for new_text in streamer:
155
- # generated_text += new_text
156
- # total_tokens += 1
157
- # current_time = time.time()
158
- # tokens_per_second = total_tokens / (current_time - start_time)
159
- # yield generated_text, f"{tokens_per_second:.2f}", ""
160
-
161
- # resource_usage = get_resource_usage()
162
- # yield generated_text, f"{tokens_per_second:.2f}", resource_usage
163
- # else:
164
- # generated_ids = llmguide_model.generate(
165
- # model_inputs.input_ids,
166
- # max_new_tokens=512
167
- # )
168
- # generated_ids = [
169
- # output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
170
- # ]
171
- # response = llmguide_tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
172
- # total_tokens = len(generated_ids[0])
173
- # end_time = time.time()
174
- # tokens_per_second = total_tokens / (end_time - start_time)
175
- # resource_usage = get_resource_usage()
176
- # yield response, f"{tokens_per_second:.2f}", resource_usage
177
-
178
- # def rag(query, stream=False):
179
- # retrieved_docs = retrieve(query)
180
- # context = " ".join(retrieved_docs)
181
- # prompt = f"Context: {context}\nQuestion: {query}\nAnswer:"
182
-
183
- # generator = llmguide_generate_response(prompt, stream)
184
-
185
- # if stream:
186
- # def stream_output():
187
- # for generated_text, tokens_per_second, ram_usage in generator:
188
- # yield generated_text, tokens_per_second, ram_usage
189
- # return stream_output()
190
- # else:
191
- # # For non-streaming, we just need to get the final output
192
- # for generated_text, tokens_per_second, ram_usage in generator:
193
- # pass # This will iterate to the last yield
194
- # return generated_text, tokens_per_second, ram_usage
195
 
 
 
 
196
 
197
  # Load the embedding model
198
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
@@ -222,6 +113,8 @@ def get_ram_usage():
222
 
223
  @spaces.GPU
224
  def llmguide_generate_response(prompt, doc_ids=None, stream=False):
 
 
225
  messages = [
226
  {"role": "system", "content": "You are a helpful assistant."},
227
  {"role": "user", "content": prompt}
@@ -295,9 +188,6 @@ def process_query(query, use_rag, stream=False):
295
  pass # This will iterate to the last yield
296
  return generated_text, tokens_per_second, ram_usage, doc_references
297
 
298
-
299
- #--------------------------------------------------------------------------------------------------------------------------------
300
-
301
  #importing FAQAllprompts from relatively_constant_variables
302
 
303
  #--------------------------------------------------------------------------------------------------------------------------------
@@ -809,21 +699,47 @@ def refresh_file_explorer():
809
 
810
  #-----------------------------------------------------------------------------------------------------------------------------------
811
 
812
- def timeline_get_random_suggestions(num_lists, items_per_list):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
813
  """
814
  Generate random suggestions from a specified number of lists.
815
 
816
  :param num_lists: Number of lists to consider
817
  :param items_per_list: Number of items to select from each list
818
- :return: A list of randomly selected suggestions
 
 
819
  """
820
- selected_lists = random.sample(all_idea_lists, min(num_lists, len(all_idea_lists)))
 
 
 
 
 
 
821
  suggestions = []
 
822
 
823
  for lst in selected_lists:
824
  suggestions.extend(random.sample(lst, min(items_per_list, len(lst))))
 
825
 
826
- return suggestions
827
 
828
  #-----------------------------------------------------------------------------------------------------------------------------------
829
 
@@ -911,15 +827,28 @@ with gr.Blocks() as demo:
911
  gr.HTML(f"placeholder for current empty JSON config shape")
912
  gr.HTML("Structural indicators of quality of config")
913
  with gr.Tab("Random Suggestions"):
 
 
 
 
 
 
 
 
 
 
914
  timeline_num_lists_slider = gr.Slider(minimum=1, maximum=len(all_idea_lists), step=1, label="Number of Lists to Consider", value=3)
915
  timeline_items_per_list_slider = gr.Slider(minimum=1, maximum=10, step=1, label="Items per List", value=3)
 
 
916
  timeline_generate_button = gr.Button("Generate Random Suggestions")
917
  timeline_output_text = gr.Textbox(label="Random Suggestions", lines=10)
 
918
 
919
  timeline_generate_button.click(
920
  timeline_get_random_suggestions,
921
- inputs=[timeline_num_lists_slider, timeline_items_per_list_slider],
922
- outputs=[timeline_output_text]
923
  )
924
  with gr.Tab("Config Specific"):
925
  gr.HTML("Timeline for making Timelines?")
 
80
 
81
 
82
  #---------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83
 
84
+ # # Initialize the zero tensor on CUDA
85
+ zero = torch.Tensor([0]).cuda()
86
+ print(zero.device) # This will print 'cpu' outside the @spaces.GPU decorated function
87
 
88
  # Load the embedding model
89
  embedding_model = SentenceTransformer('all-MiniLM-L6-v2')
 
113
 
114
  @spaces.GPU
115
  def llmguide_generate_response(prompt, doc_ids=None, stream=False):
116
+ print(zero.device) # This will print 'cuda:0' inside the @spaces.GPU decorated function
117
+
118
  messages = [
119
  {"role": "system", "content": "You are a helpful assistant."},
120
  {"role": "user", "content": prompt}
 
188
  pass # This will iterate to the last yield
189
  return generated_text, tokens_per_second, ram_usage, doc_references
190
 
 
 
 
191
  #importing FAQAllprompts from relatively_constant_variables
192
 
193
  #--------------------------------------------------------------------------------------------------------------------------------
 
699
 
700
  #-----------------------------------------------------------------------------------------------------------------------------------
701
 
702
+ # def timeline_get_random_suggestions(num_lists, items_per_list):
703
+ # """
704
+ # Generate random suggestions from a specified number of lists.
705
+
706
+ # :param num_lists: Number of lists to consider
707
+ # :param items_per_list: Number of items to select from each list
708
+ # :return: A list of randomly selected suggestions
709
+ # """
710
+ # selected_lists = random.sample(all_idea_lists, min(num_lists, len(all_idea_lists)))
711
+ # suggestions = []
712
+
713
+ # for lst in selected_lists:
714
+ # suggestions.extend(random.sample(lst, min(items_per_list, len(lst))))
715
+
716
+ # return suggestions
717
+
718
+ def timeline_get_random_suggestions(num_lists, items_per_list, include_existing_games, include_multiplayer):
719
  """
720
  Generate random suggestions from a specified number of lists.
721
 
722
  :param num_lists: Number of lists to consider
723
  :param items_per_list: Number of items to select from each list
724
+ :param include_existing_games: Whether to include existing game inspiration lists
725
+ :param include_multiplayer: Whether to include multiplayer features list
726
+ :return: A tuple containing the list of randomly selected suggestions and the names of selected lists
727
  """
728
+ available_lists = all_idea_lists.copy()
729
+ if not include_existing_games:
730
+ available_lists = [lst for lst in available_lists if lst not in existing_game_inspirations]
731
+ if not include_multiplayer:
732
+ available_lists = [lst for lst in available_lists if lst != multiplayer_features]
733
+
734
+ selected_lists = random.sample(available_lists, min(num_lists, len(available_lists)))
735
  suggestions = []
736
+ selected_list_names = []
737
 
738
  for lst in selected_lists:
739
  suggestions.extend(random.sample(lst, min(items_per_list, len(lst))))
740
+ selected_list_names.append(list_names[all_idea_lists.index(lst)])
741
 
742
+ return suggestions, selected_list_names
743
 
744
  #-----------------------------------------------------------------------------------------------------------------------------------
745
 
 
827
  gr.HTML(f"placeholder for current empty JSON config shape")
828
  gr.HTML("Structural indicators of quality of config")
829
  with gr.Tab("Random Suggestions"):
830
+ # timeline_num_lists_slider = gr.Slider(minimum=1, maximum=len(all_idea_lists), step=1, label="Number of Lists to Consider", value=3)
831
+ # timeline_items_per_list_slider = gr.Slider(minimum=1, maximum=10, step=1, label="Items per List", value=3)
832
+ # timeline_generate_button = gr.Button("Generate Random Suggestions")
833
+ # timeline_output_text = gr.Textbox(label="Random Suggestions", lines=10)
834
+
835
+ # timeline_generate_button.click(
836
+ # timeline_get_random_suggestions,
837
+ # inputs=[timeline_num_lists_slider, timeline_items_per_list_slider],
838
+ # outputs=[timeline_output_text]
839
+ # )
840
  timeline_num_lists_slider = gr.Slider(minimum=1, maximum=len(all_idea_lists), step=1, label="Number of Lists to Consider", value=3)
841
  timeline_items_per_list_slider = gr.Slider(minimum=1, maximum=10, step=1, label="Items per List", value=3)
842
+ timeline_include_existing_games = gr.Checkbox(label="Include Existing Game Inspirations", value=True)
843
+ timeline_include_multiplayer = gr.Checkbox(label="Include Multiplayer Features", value=True)
844
  timeline_generate_button = gr.Button("Generate Random Suggestions")
845
  timeline_output_text = gr.Textbox(label="Random Suggestions", lines=10)
846
+ timeline_selected_lists_text = gr.Textbox(label="Selected Lists", lines=2)
847
 
848
  timeline_generate_button.click(
849
  timeline_get_random_suggestions,
850
+ inputs=[timeline_num_lists_slider, timeline_items_per_list_slider, timeline_include_existing_games, timeline_include_multiplayer],
851
+ outputs=[timeline_output_text, timeline_selected_lists_text]
852
  )
853
  with gr.Tab("Config Specific"):
854
  gr.HTML("Timeline for making Timelines?")
relatively_constant_variables.py CHANGED
@@ -870,6 +870,119 @@ TestmedialoadinHTML = """
870
 
871
  #-------------------------#-------------------------#-------------------------#-------------------------
872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
873
  # GTA Heists - Same Map with overlapping branching narratives
874
  gta_heist_inspired = [
875
  "Multiple missions/quests available in the same location",
 
870
 
871
  #-------------------------#-------------------------#-------------------------#-------------------------
872
 
873
+ timeline_components = [
874
+ "Story progression tracker",
875
+ "Inventory management system",
876
+ "Character stat system",
877
+ "Quest log",
878
+ "Achievement system",
879
+ "NPC relationship tracker",
880
+ "Time and weather simulation",
881
+ "Economy and trading system",
882
+ "Crafting system",
883
+ "Skill tree",
884
+ "Dialogue manager",
885
+ "Combat system",
886
+ "Save/Load system",
887
+ "Random event generator",
888
+ "Faction reputation system"
889
+ ]
890
+
891
+ # Decisions (and context explanation)
892
+ timeline_decision_features = [
893
+ "Moral alignment system (good/neutral/evil)",
894
+ "Consequence ripple effect (choices affect multiple future events)",
895
+ "Timed decisions (limited time to choose)",
896
+ "Hidden choices (unlocked by specific conditions)",
897
+ "Context-sensitive options (choices change based on character stats/items)",
898
+ "Dialogue trees with branching paths",
899
+ "Risk/reward choices (high risk, high reward options)",
900
+ "Character personality development through choices",
901
+ "Relationship-altering decisions",
902
+ "Story-altering key decisions",
903
+ "Multi-step decision processes",
904
+ "Decision journals (recap of major choices)",
905
+ "Faction-based decisions",
906
+ "Profession or class-specific choices",
907
+ "Ethical dilemmas with no clear 'right' answer"
908
+ ]
909
+
910
+ # Nested Sections
911
+ timeline_nested_section_ideas = [
912
+ "Multilevel dungeons",
913
+ "City districts with sub-locations",
914
+ "Planetary system with multiple explorable planets",
915
+ "Dreamworld with nested dreams",
916
+ "Time periods (past, present, future) with sub-events",
917
+ "Parallel universes with alternate versions of locations",
918
+ "Miniature worlds inside objects",
919
+ "Body systems (for a medical game or body-exploration adventure)",
920
+ "Nested memories or flashbacks",
921
+ "Virtual reality levels within the game world",
922
+ "Dimensional rifts with sub-dimensions",
923
+ "Organizational hierarchies (e.g., crime family structure)",
924
+ "Ecosystem layers (e.g., forest canopy, understory, forest floor)",
925
+ "Nested puzzles (solving one unlocks another)",
926
+ "Matryoshka doll-like structures with hidden secrets"
927
+ ]
928
+
929
+ # Media (Especially to affect decisions)
930
+ timeline_media_integration = [
931
+ "Dynamic background music changing with mood/tension",
932
+ "Character portrait changes reflecting emotional states",
933
+ "Environmental sound effects influencing player choices",
934
+ "Visual cues (color schemes, lighting) hinting at outcomes",
935
+ "Cinematic cutscenes revealing crucial information",
936
+ "Interactive maps updating with player discoveries",
937
+ "Voice acting with tone influencing trustworthiness",
938
+ "Symbolic imagery foreshadowing consequences",
939
+ "Augmented reality overlays providing decision hints",
940
+ "Character theme motifs indicating importance of NPCs",
941
+ "Weather effects reflecting story mood",
942
+ "Dream sequences visualizing potential outcomes",
943
+ "Time-lapse visuals showing long-term effects of choices",
944
+ "Split-screen effects for simultaneous events",
945
+ "Flashback scenes providing context for decisions"
946
+ ]
947
+
948
+ # Replayability (GTA and Tekken type mechanics in text form)
949
+ timeline_replayability_features = [
950
+ "Multiple character origins with unique storylines",
951
+ "Randomized side quests and events",
952
+ "Unlockable skills and abilities for subsequent playthroughs",
953
+ "New Game+ mode with increased difficulty and rewards",
954
+ "Alternative routes to objectives",
955
+ "Hidden endings based on specific choices or achievements",
956
+ "Character customization affecting story and gameplay",
957
+ "Faction system with different allegiances each playthrough",
958
+ "Time-based events that change on each playthrough",
959
+ "Roguelike elements (permadeath, procedural generation)",
960
+ "Challenge modes (speed runs, minimalist runs)",
961
+ "Branching skill trees with mutually exclusive paths",
962
+ "Collectibles that unlock new content or lore",
963
+ "Dynamic world events that evolve differently each time",
964
+ "Secret characters or modes to unlock"
965
+ ]
966
+
967
+ # Theme integration (Modified Variables that affect UI or config order)
968
+ timeline_theme_integration = [
969
+ "Dynamic UI color schemes based on location or story progress",
970
+ "Font styles changing with different eras or dimensions",
971
+ "Adaptive music system reflecting current theme",
972
+ "UI element shapes and designs matching the current environment",
973
+ "Loading screen tips and lore adapted to current theme",
974
+ "Cursor or pointer design changing with themes",
975
+ "Theme-specific animations for transitions between scenes",
976
+ "Variable text descriptors (e.g., 'cold' vs 'scorching' based on environment)",
977
+ "Themed icons for inventory and skills",
978
+ "Background textures or patterns reflecting current setting",
979
+ "Character portrait frames styled to match the theme",
980
+ "Theme-specific sound effects for UI interactions",
981
+ "Adaptive difficulty adjustments based on chosen theme",
982
+ "Mini-map or navigation elements styled to fit the theme",
983
+ "Theme-specific filters or overlays for the game view"
984
+ ]
985
+
986
  # GTA Heists - Same Map with overlapping branching narratives
987
  gta_heist_inspired = [
988
  "Multiple missions/quests available in the same location",