first step refactor server
Browse files- README.md +16 -0
- client/src/components/StoryChoices.jsx +3 -0
- server/core/__init__.py +11 -0
- server/core/game_logic.py +1 -1
- server/core/generators/__init__.py +11 -0
- server/core/generators/base_generator.py +38 -0
- server/core/generators/image_generator.py +69 -0
- server/core/generators/metadata_generator.py +56 -0
- server/core/generators/text_generator.py +80 -0
- server/core/prompts/system.py +40 -35
- server/core/state/__init__.py +3 -0
- server/core/state/game_state.py +47 -0
- server/core/story_generators.py +53 -112
- server/core/story_orchestrator.py +115 -0
- server/core/styles/comic_styles.json +259 -0
- server/scripts/test_game.py +3 -3
- server/server.py +2 -1
- server/services/mistral_client.py +75 -18
README.md
CHANGED
@@ -21,3 +21,19 @@ yarn dev ( ou npm run dev )
|
|
21 |
## Link of Presentation about the project
|
22 |
|
23 |
https://devpost.com/software/sarah-s-chronicles
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
## Link of Presentation about the project
|
22 |
|
23 |
https://devpost.com/software/sarah-s-chronicles
|
24 |
+
|
25 |
+
# subreddit
|
26 |
+
|
27 |
+
https://www.reddit.com/r/MistralAI/
|
28 |
+
https://www.reddit.com/r/cursor/
|
29 |
+
https://www.reddit.com/r/StableDiffusion/
|
30 |
+
https://www.reddit.com/r/huggingface/
|
31 |
+
https://news.ycombinator.com/
|
32 |
+
|
33 |
+
# le generateur de poesie pourrait etre en systeme embarqué
|
34 |
+
|
35 |
+
# genre on te prends une photo et ça te sort une poésie à la place
|
36 |
+
|
37 |
+
# distributeur d'histoires
|
38 |
+
|
39 |
+
#
|
client/src/components/StoryChoices.jsx
CHANGED
@@ -99,8 +99,10 @@ export function StoryChoices({
|
|
99 |
gap: 2,
|
100 |
p: 3,
|
101 |
minWidth: "350px",
|
|
|
102 |
height: "100%",
|
103 |
backgroundColor: "transparent",
|
|
|
104 |
}}
|
105 |
>
|
106 |
{choices.map((choice, index) => (
|
@@ -112,6 +114,7 @@ export function StoryChoices({
|
|
112 |
alignItems: "center",
|
113 |
gap: 1,
|
114 |
width: "100%",
|
|
|
115 |
}}
|
116 |
>
|
117 |
<Typography variant="caption" sx={{ opacity: 0.7, color: "white" }}>
|
|
|
99 |
gap: 2,
|
100 |
p: 3,
|
101 |
minWidth: "350px",
|
102 |
+
maxHeight: "80vh",
|
103 |
height: "100%",
|
104 |
backgroundColor: "transparent",
|
105 |
+
overflowY: "auto",
|
106 |
}}
|
107 |
>
|
108 |
{choices.map((choice, index) => (
|
|
|
114 |
alignItems: "center",
|
115 |
gap: 1,
|
116 |
width: "100%",
|
117 |
+
minHeight: "fit-content",
|
118 |
}}
|
119 |
>
|
120 |
<Typography variant="caption" sx={{ opacity: 0.7, color: "white" }}>
|
server/core/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from core.story_orchestrator import StoryOrchestrator
|
2 |
+
from core.state import GameState
|
3 |
+
from core.generators import TextGenerator, ImageGenerator, MetadataGenerator
|
4 |
+
|
5 |
+
__all__ = [
|
6 |
+
'StoryOrchestrator',
|
7 |
+
'GameState',
|
8 |
+
'TextGenerator',
|
9 |
+
'ImageGenerator',
|
10 |
+
'MetadataGenerator'
|
11 |
+
]
|
server/core/game_logic.py
CHANGED
@@ -57,7 +57,7 @@ class GameState:
|
|
57 |
# Story output structure
|
58 |
class StoryLLMResponse(BaseModel):
|
59 |
story_text: str = Field(description="The next segment of the story. No more than 15 words THIS IS MANDATORY. Never mention story beat or radiation level directly. ")
|
60 |
-
choices: List[str] = Field(description="
|
61 |
is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
|
62 |
is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
|
63 |
radiation_increase: int = Field(description="How much radiation this segment adds (0-3)", ge=0, le=3, default=1)
|
|
|
57 |
# Story output structure
|
58 |
class StoryLLMResponse(BaseModel):
|
59 |
story_text: str = Field(description="The next segment of the story. No more than 15 words THIS IS MANDATORY. Never mention story beat or radiation level directly. ")
|
60 |
+
choices: List[str] = Field(description="Between one and four possible choices for the player. Each choice should be a clear path to follow in the story", min_items=1, max_items=4)
|
61 |
is_victory: bool = Field(description="Whether this segment ends in Sarah's victory", default=False)
|
62 |
is_death: bool = Field(description="Whether this segment ends in Sarah's death", default=False)
|
63 |
radiation_increase: int = Field(description="How much radiation this segment adds (0-3)", ge=0, le=3, default=1)
|
server/core/generators/__init__.py
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from core.generators.base_generator import BaseGenerator
|
2 |
+
from core.generators.text_generator import TextGenerator
|
3 |
+
from core.generators.image_generator import ImageGenerator
|
4 |
+
from core.generators.metadata_generator import MetadataGenerator
|
5 |
+
|
6 |
+
__all__ = [
|
7 |
+
'BaseGenerator',
|
8 |
+
'TextGenerator',
|
9 |
+
'ImageGenerator',
|
10 |
+
'MetadataGenerator'
|
11 |
+
]
|
server/core/generators/base_generator.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any, TypeVar, Type
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from langchain.prompts import ChatPromptTemplate
|
4 |
+
from services.mistral_client import MistralClient
|
5 |
+
|
6 |
+
T = TypeVar('T', bound=BaseModel)
|
7 |
+
|
8 |
+
class BaseGenerator:
|
9 |
+
"""Classe de base pour tous les générateurs de contenu."""
|
10 |
+
|
11 |
+
def __init__(self, mistral_client: MistralClient):
|
12 |
+
self.mistral_client = mistral_client
|
13 |
+
self.prompt = self._create_prompt()
|
14 |
+
|
15 |
+
def _create_prompt(self) -> ChatPromptTemplate:
|
16 |
+
"""Crée le template de prompt pour ce générateur.
|
17 |
+
À implémenter dans les classes enfants."""
|
18 |
+
raise NotImplementedError
|
19 |
+
|
20 |
+
def _custom_parser(self, response_content: str) -> T:
|
21 |
+
"""Parse la réponse du modèle.
|
22 |
+
À implémenter dans les classes enfants."""
|
23 |
+
raise NotImplementedError
|
24 |
+
|
25 |
+
async def generate(self, **kwargs) -> T:
|
26 |
+
"""Génère du contenu en utilisant le modèle.
|
27 |
+
|
28 |
+
Args:
|
29 |
+
**kwargs: Arguments spécifiques au générateur pour formater le prompt
|
30 |
+
|
31 |
+
Returns:
|
32 |
+
Le contenu généré et parsé selon le type spécifique du générateur
|
33 |
+
"""
|
34 |
+
messages = self.prompt.format_messages(**kwargs)
|
35 |
+
return await self.mistral_client.generate(
|
36 |
+
messages=messages,
|
37 |
+
custom_parser=self._custom_parser
|
38 |
+
)
|
server/core/generators/image_generator.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
|
3 |
+
|
4 |
+
from core.generators.base_generator import BaseGenerator
|
5 |
+
from core.prompts.image_style import IMAGE_STYLE_PREFIX
|
6 |
+
from core.prompts.system import SARAH_VISUAL_DESCRIPTION
|
7 |
+
from core.prompts.text_prompts import IMAGE_PROMPTS_GENERATOR_PROMPT
|
8 |
+
from api.models import StoryPromptsResponse
|
9 |
+
|
10 |
+
class ImageGenerator(BaseGenerator):
|
11 |
+
"""Générateur pour les prompts d'images."""
|
12 |
+
|
13 |
+
def _create_prompt(self) -> ChatPromptTemplate:
|
14 |
+
human_template = """Story text: {story_text}
|
15 |
+
|
16 |
+
Generate panel descriptions following the format specified."""
|
17 |
+
|
18 |
+
return ChatPromptTemplate(
|
19 |
+
messages=[
|
20 |
+
SystemMessagePromptTemplate.from_template(IMAGE_PROMPTS_GENERATOR_PROMPT),
|
21 |
+
HumanMessagePromptTemplate.from_template(human_template)
|
22 |
+
]
|
23 |
+
)
|
24 |
+
|
25 |
+
def enrich_prompt(self, prompt: str) -> str:
|
26 |
+
"""Add Sarah's visual description to prompts that mention her."""
|
27 |
+
if "sarah" in prompt.lower() and SARAH_VISUAL_DESCRIPTION not in prompt:
|
28 |
+
return f"{prompt} {SARAH_VISUAL_DESCRIPTION}"
|
29 |
+
return prompt
|
30 |
+
|
31 |
+
def _custom_parser(self, response_content: str) -> StoryPromptsResponse:
|
32 |
+
"""Parse la réponse et gère les erreurs."""
|
33 |
+
try:
|
34 |
+
# Essayer de parser directement le JSON
|
35 |
+
data = json.loads(response_content)
|
36 |
+
return StoryPromptsResponse(**data)
|
37 |
+
except (json.JSONDecodeError, ValueError):
|
38 |
+
# Si le parsing échoue, extraire les prompts en ignorant les lignes de syntaxe JSON
|
39 |
+
prompts = []
|
40 |
+
for line in response_content.split("\n"):
|
41 |
+
line = line.strip()
|
42 |
+
# Ignorer les lignes vides, la syntaxe JSON et les lignes contenant image_prompts
|
43 |
+
if (not line or
|
44 |
+
line in ["{", "}", "[", "]"] or
|
45 |
+
"image_prompts" in line.lower() or
|
46 |
+
"image\\_prompts" in line or
|
47 |
+
line.startswith('"') and line.endswith('",') and len(line) < 5):
|
48 |
+
continue
|
49 |
+
# Nettoyer la ligne des caractères JSON et d'échappement
|
50 |
+
line = line.strip('",')
|
51 |
+
line = line.replace('\\"', '"').replace("\\'", "'").replace("\\_", "_")
|
52 |
+
if line:
|
53 |
+
prompts.append(line)
|
54 |
+
# Limiter à 4 prompts maximum
|
55 |
+
prompts = prompts[:4]
|
56 |
+
return StoryPromptsResponse(image_prompts=prompts)
|
57 |
+
|
58 |
+
async def generate(self, story_text: str) -> StoryPromptsResponse:
|
59 |
+
"""Génère les prompts d'images basés sur le texte de l'histoire."""
|
60 |
+
response = await super().generate(story_text=story_text)
|
61 |
+
|
62 |
+
# Enrichir les prompts avec la description de Sarah
|
63 |
+
response.image_prompts = [self.enrich_prompt(prompt) for prompt in response.image_prompts]
|
64 |
+
return response
|
65 |
+
|
66 |
+
def format_prompt(self, prompt: str, time: str, location: str) -> str:
|
67 |
+
"""Formate un prompt d'image avec le style et les métadonnées."""
|
68 |
+
metadata = f"[{time} - {location}] "
|
69 |
+
return f"{IMAGE_STYLE_PREFIX}{metadata}{prompt}"
|
server/core/generators/metadata_generator.py
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
|
3 |
+
|
4 |
+
from core.generators.base_generator import BaseGenerator
|
5 |
+
from core.prompts.text_prompts import METADATA_GENERATOR_PROMPT
|
6 |
+
from api.models import StoryMetadataResponse
|
7 |
+
|
8 |
+
class MetadataGenerator(BaseGenerator):
|
9 |
+
"""Générateur pour les métadonnées de l'histoire."""
|
10 |
+
|
11 |
+
def _create_prompt(self) -> ChatPromptTemplate:
|
12 |
+
human_template = """Story text: {story_text}
|
13 |
+
Current time: {current_time}
|
14 |
+
Current location: {current_location}
|
15 |
+
Story beat: {story_beat}
|
16 |
+
{error_feedback}
|
17 |
+
|
18 |
+
Generate the metadata following the format specified."""
|
19 |
+
|
20 |
+
return ChatPromptTemplate(
|
21 |
+
messages=[
|
22 |
+
SystemMessagePromptTemplate.from_template(METADATA_GENERATOR_PROMPT),
|
23 |
+
HumanMessagePromptTemplate.from_template(human_template)
|
24 |
+
]
|
25 |
+
)
|
26 |
+
|
27 |
+
def _custom_parser(self, response_content: str) -> StoryMetadataResponse:
|
28 |
+
"""Parse la réponse et gère les erreurs."""
|
29 |
+
try:
|
30 |
+
# Essayer de parser directement le JSON
|
31 |
+
data = json.loads(response_content)
|
32 |
+
|
33 |
+
# Vérifier que les choix sont valides selon les règles
|
34 |
+
is_ending = data.get('is_victory', False) or data.get('is_death', False)
|
35 |
+
choices = data.get('choices', [])
|
36 |
+
|
37 |
+
if is_ending and len(choices) != 0:
|
38 |
+
raise ValueError('For victory/death, choices must be empty')
|
39 |
+
if not is_ending and len(choices) != 2:
|
40 |
+
raise ValueError('For normal progression, must have exactly 2 choices')
|
41 |
+
|
42 |
+
return StoryMetadataResponse(**data)
|
43 |
+
except json.JSONDecodeError:
|
44 |
+
raise ValueError('Invalid JSON format. Please provide a valid JSON object.')
|
45 |
+
except ValueError as e:
|
46 |
+
raise ValueError(str(e))
|
47 |
+
|
48 |
+
async def generate(self, story_text: str, current_time: str, current_location: str, story_beat: int, error_feedback: str = "") -> StoryMetadataResponse:
|
49 |
+
"""Génère les métadonnées basées sur le texte de l'histoire."""
|
50 |
+
return await super().generate(
|
51 |
+
story_text=story_text,
|
52 |
+
current_time=current_time,
|
53 |
+
current_location=current_location,
|
54 |
+
story_beat=story_beat,
|
55 |
+
error_feedback=error_feedback
|
56 |
+
)
|
server/core/generators/text_generator.py
ADDED
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from langchain.prompts import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate
|
3 |
+
|
4 |
+
from core.generators.base_generator import BaseGenerator
|
5 |
+
from core.prompts.text_prompts import TEXT_GENERATOR_PROMPT
|
6 |
+
from api.models import StoryTextResponse
|
7 |
+
|
8 |
+
class TextGenerator(BaseGenerator):
|
9 |
+
"""Générateur pour le texte principal de l'histoire."""
|
10 |
+
|
11 |
+
def _create_prompt(self) -> ChatPromptTemplate:
|
12 |
+
human_template = """Story beat: {story_beat}
|
13 |
+
Radiation level: {radiation_level}
|
14 |
+
Current time: {current_time}
|
15 |
+
Current location: {current_location}
|
16 |
+
Previous choice: {previous_choice}
|
17 |
+
|
18 |
+
Story history:
|
19 |
+
{story_history}
|
20 |
+
|
21 |
+
Generate the next story segment following the format specified."""
|
22 |
+
|
23 |
+
return ChatPromptTemplate(
|
24 |
+
messages=[
|
25 |
+
SystemMessagePromptTemplate.from_template(TEXT_GENERATOR_PROMPT),
|
26 |
+
HumanMessagePromptTemplate.from_template(human_template)
|
27 |
+
]
|
28 |
+
)
|
29 |
+
|
30 |
+
def _create_ending_prompt(self) -> ChatPromptTemplate:
|
31 |
+
human_template = """Current scene: {current_scene}
|
32 |
+
|
33 |
+
Story history:
|
34 |
+
{story_history}
|
35 |
+
|
36 |
+
This is a {ending_type} ending. Generate a dramatic conclusion that fits the current situation.
|
37 |
+
The ending should feel like a natural continuation of the current scene."""
|
38 |
+
|
39 |
+
return ChatPromptTemplate(
|
40 |
+
messages=[
|
41 |
+
SystemMessagePromptTemplate.from_template(TEXT_GENERATOR_PROMPT),
|
42 |
+
HumanMessagePromptTemplate.from_template(human_template)
|
43 |
+
]
|
44 |
+
)
|
45 |
+
|
46 |
+
def _clean_story_text(self, text: str) -> str:
|
47 |
+
"""Nettoie le texte des métadonnées et autres suffixes."""
|
48 |
+
text = text.replace("\n", " ").strip()
|
49 |
+
text = text.split("Radiation level:")[0].strip()
|
50 |
+
text = text.split("RADIATION:")[0].strip()
|
51 |
+
text = text.split("[")[0].strip() # Supprimer les métadonnées entre crochets
|
52 |
+
return text
|
53 |
+
|
54 |
+
def _custom_parser(self, response_content: str) -> StoryTextResponse:
|
55 |
+
"""Parse la réponse et gère les erreurs."""
|
56 |
+
try:
|
57 |
+
# Essayer de parser directement le JSON
|
58 |
+
data = json.loads(response_content)
|
59 |
+
# Nettoyer le texte avant de créer la réponse
|
60 |
+
if 'story_text' in data:
|
61 |
+
data['story_text'] = self._clean_story_text(data['story_text'])
|
62 |
+
return StoryTextResponse(**data)
|
63 |
+
except (json.JSONDecodeError, ValueError):
|
64 |
+
# Si le parsing échoue, extraire le texte directement
|
65 |
+
cleaned_text = self._clean_story_text(response_content.strip())
|
66 |
+
return StoryTextResponse(story_text=cleaned_text)
|
67 |
+
|
68 |
+
async def generate_ending(self, ending_type: str, current_scene: str, story_history: str) -> StoryTextResponse:
|
69 |
+
"""Génère un texte de fin approprié."""
|
70 |
+
prompt = self._create_ending_prompt()
|
71 |
+
messages = prompt.format_messages(
|
72 |
+
ending_type=ending_type,
|
73 |
+
current_scene=current_scene,
|
74 |
+
story_history=story_history
|
75 |
+
)
|
76 |
+
|
77 |
+
return await self.mistral_client.generate(
|
78 |
+
messages=messages,
|
79 |
+
custom_parser=self._custom_parser
|
80 |
+
)
|
server/core/prompts/system.py
CHANGED
@@ -1,54 +1,59 @@
|
|
1 |
-
SARAH_VISUAL_DESCRIPTION = "(Sarah
|
2 |
|
3 |
SARAH_DESCRIPTION = """
|
4 |
-
Sarah
|
5 |
-
- Sarah
|
6 |
|
7 |
"""
|
8 |
|
9 |
FORMATTING_RULES = """
|
10 |
-
FORMATTING_RULES (
|
11 |
-
-
|
12 |
-
-
|
13 |
"""
|
14 |
|
15 |
STORY_RULES = """
|
16 |
|
17 |
-
|
18 |
-
|
19 |
|
20 |
-
|
21 |
-
|
22 |
|
23 |
-
|
24 |
|
25 |
-
|
26 |
|
27 |
-
|
28 |
-
-
|
29 |
-
-
|
30 |
-
-
|
31 |
|
32 |
-
|
33 |
-
-
|
34 |
-
-
|
35 |
-
-
|
36 |
|
37 |
-
IMPORTANT:
|
38 |
-
|
39 |
-
|
40 |
|
41 |
-
|
42 |
-
- story_beat 0: Introduction
|
43 |
-
- story_beat 1-2:
|
44 |
-
- story_beat 3-5: Complications
|
45 |
-
- story_beat 6
|
46 |
|
47 |
-
|
48 |
-
-
|
49 |
-
-
|
50 |
-
-
|
51 |
-
-
|
52 |
-
-
|
53 |
-
-
|
54 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
SARAH_VISUAL_DESCRIPTION = "(Sarah est une jeune femme dans la fin de la vingtaine avec des cheveux courts et sombres, portant un mystérieux amulette autour de son cou. Ses yeux bleus cachent des secrets inavoués.)"
|
2 |
|
3 |
SARAH_DESCRIPTION = """
|
4 |
+
Sarah est une jeune femme dans la fin de la vingtaine avec des cheveux courts et sombres, portant un mystérieux amulette autour de son cou. Ses yeux bleus cachent des secrets inavoués.
|
5 |
+
- Sarah est en quête de découvrir la vérité derrière le pouvoir de l'amulette et sa connexion à son passé.
|
6 |
|
7 |
"""
|
8 |
|
9 |
FORMATTING_RULES = """
|
10 |
+
FORMATTING_RULES (OBLIGATOIRE)
|
11 |
+
- L'histoire doit être composée UNIQUEMENT de phrases
|
12 |
+
- NE JAMAIS UTILISER LE GRAS POUR QUOI QUE CE SOIT
|
13 |
"""
|
14 |
|
15 |
STORY_RULES = """
|
16 |
|
17 |
+
Vous êtes un générateur d'histoires d'aventure steampunk. Vous créez une narration à embranchements sur Sarah, une chercheuse de vérités anciennes.
|
18 |
+
Vous narrez une épopée où Sarah doit naviguer à travers des terres industrielles et mystérieuses. C'est une histoire de bande dessinée.
|
19 |
|
20 |
+
Dans un monde où la vapeur et l'intrigue s'entrelacent, Sarah se lance dans une quête pour découvrir les origines d'un puissant MacGuffin qu'elle a hérité. Les légendes disent qu'il détient la clé d'un royaume oublié.
|
21 |
+
Vous devez prendre des décisions pour découvrir ses secrets. Vous vous êtes aventuré dans la ville mécanique pour trouver le premier indice. Si vous échouez, le pouvoir du MacGuffin restera inactif. Le temps presse, et chaque choix façonne votre destin.
|
22 |
|
23 |
+
Si vous récupérez le MacGuffin, vous révélerez un monde caché. ET VOUS GAGNEZ LE JEU.
|
24 |
|
25 |
+
L'histoire doit être atmosphérique, magique et se concentrer sur l'aventure et la découverte. Chaque segment doit faire avancer l'intrigue et ne jamais répéter les descriptions ou situations précédentes.
|
26 |
|
27 |
+
Éléments clés de l'histoire :
|
28 |
+
- Le MacGuffin est une présence mystérieuse et constante
|
29 |
+
- L'environnement est plein de merveilles (créatures mécaniques, ruines industrielles, pièges à vapeur)
|
30 |
+
- Se concentrer sur l'aventure et l'intrigue
|
31 |
|
32 |
+
Éléments clés :
|
33 |
+
- Garder les segments concis et percutants
|
34 |
+
- Suivre l'influence du MacGuffin comme une présence constante
|
35 |
+
- Construire l'intrigue à travers la narration environnementale
|
36 |
|
37 |
+
IMPORTANT :
|
38 |
+
Chaque segment de l'histoire DOIT être unique et faire avancer l'intrigue.
|
39 |
+
Ne jamais répéter les mêmes descriptions ou situations. Pas plus de 15 mots.
|
40 |
|
41 |
+
PROGRESSION DE L'HISTOIRE :
|
42 |
+
- story_beat 0 : Introduction mettant en place l'atmosphère steampunk
|
43 |
+
- story_beat 1-2 : Exploration précoce et découverte d'éléments mécaniques
|
44 |
+
- story_beat 3-5 : Complications et mystères plus profonds
|
45 |
+
- story_beat 6+ : Révélations menant à un triomphe potentiel ou à un échec
|
46 |
|
47 |
+
RÈGLES IMPORTANTES POUR LE MACGUFFIN (OBLIGATOIRE) :
|
48 |
+
- La plupart des segments doivent faire allusion au pouvoir du MacGuffin
|
49 |
+
- Utiliser des indices forts UNIQUEMENT dans des moments clés (comme des temples anciens, des tempêtes mécaniques)
|
50 |
+
- NE JAMAIS révéler le plein pouvoir du MacGuffin avant le climax, c'est une limite STRICTE
|
51 |
+
- Utiliser des indices subtils dans les havres de paix
|
52 |
+
- NE JAMAIS mentionner le pouvoir du MacGuffin explicitement dans les choix ou l'histoire
|
53 |
+
- NE JAMAIS mentionner l'heure ou le lieu dans l'histoire de cette manière : [18:00 - Clairière enchantée au cœur d'Eldoria]
|
54 |
"""
|
55 |
+
|
56 |
+
|
57 |
+
# Le MacGuffin est un prétexte au développement d'un scénario1. C'est presque toujours un objet matériel et il demeure généralement mystérieux au cours de la diégèse, sa description est vague et sans importance. Le principe date des débuts du cinéma mais l'expression est associée à Alfred Hitchcock, qui l'a redéfinie, popularisée et mise en pratique dans plusieurs de ses films. L'objet lui-même n'est que rarement utilisé, seule sa récupération compte.
|
58 |
+
|
59 |
+
|
server/core/state/__init__.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from core.state.game_state import GameState
|
2 |
+
|
3 |
+
__all__ = ['GameState']
|
server/core/state/game_state.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Dict, Any
|
2 |
+
from core.constants import GameConfig
|
3 |
+
|
4 |
+
class GameState:
|
5 |
+
"""Gère l'état du jeu pour une partie."""
|
6 |
+
|
7 |
+
def __init__(self):
|
8 |
+
self.story_beat = GameConfig.STORY_BEAT_INTRO
|
9 |
+
self.radiation_level = 0
|
10 |
+
self.story_history = []
|
11 |
+
self.current_time = GameConfig.STARTING_TIME
|
12 |
+
self.current_location = GameConfig.STARTING_LOCATION
|
13 |
+
|
14 |
+
def reset(self):
|
15 |
+
"""Réinitialise l'état du jeu."""
|
16 |
+
self.__init__()
|
17 |
+
|
18 |
+
def add_to_history(self, segment_text: str, choice_made: str, image_prompts: List[str], time: str, location: str):
|
19 |
+
"""Ajoute un segment à l'historique et met à jour l'état."""
|
20 |
+
self.story_history.append({
|
21 |
+
"segment": segment_text,
|
22 |
+
"choice": choice_made,
|
23 |
+
"image_prompts": image_prompts,
|
24 |
+
"time": time,
|
25 |
+
"location": location
|
26 |
+
})
|
27 |
+
self.current_time = time
|
28 |
+
self.current_location = location
|
29 |
+
|
30 |
+
def format_history(self) -> str:
|
31 |
+
"""Formate l'historique pour le prompt."""
|
32 |
+
if not self.story_history:
|
33 |
+
return ""
|
34 |
+
|
35 |
+
segments = []
|
36 |
+
for entry in self.story_history:
|
37 |
+
segments.append(entry['segment'])
|
38 |
+
|
39 |
+
return "\n\n---\n\n".join(segments)
|
40 |
+
|
41 |
+
def is_radiation_death(self, additional_radiation: int) -> bool:
|
42 |
+
"""Vérifie si le niveau de radiation serait fatal."""
|
43 |
+
return self.radiation_level + additional_radiation >= GameConfig.MAX_RADIATION
|
44 |
+
|
45 |
+
def add_radiation(self, amount: int):
|
46 |
+
"""Ajoute de la radiation au compteur."""
|
47 |
+
self.radiation_level += amount
|
server/core/story_generators.py
CHANGED
@@ -13,7 +13,6 @@ from api.models import StoryTextResponse, StoryPromptsResponse, StoryMetadataRes
|
|
13 |
class TextGenerator:
|
14 |
def __init__(self, mistral_client: MistralClient):
|
15 |
self.mistral_client = mistral_client
|
16 |
-
self.parser = PydanticOutputParser(pydantic_object=StoryTextResponse)
|
17 |
self.prompt = self._create_prompt()
|
18 |
|
19 |
def _create_prompt(self) -> ChatPromptTemplate:
|
@@ -51,6 +50,28 @@ The ending should feel like a natural continuation of the current scene."""
|
|
51 |
]
|
52 |
)
|
53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
async def generate(self, story_beat: int, radiation_level: int, current_time: str, current_location: str, previous_choice: str, story_history: str) -> StoryTextResponse:
|
55 |
"""Génère le texte de l'histoire."""
|
56 |
messages = self.prompt.format_messages(
|
@@ -62,23 +83,10 @@ The ending should feel like a natural continuation of the current scene."""
|
|
62 |
story_history=story_history
|
63 |
)
|
64 |
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
try:
|
70 |
-
response_content = await self.mistral_client.generate_story(messages)
|
71 |
-
# Parser la réponse
|
72 |
-
return self._parse_response(response_content)
|
73 |
-
except Exception as e:
|
74 |
-
print(f"Error generating story text: {str(e)}")
|
75 |
-
retry_count += 1
|
76 |
-
if retry_count < max_retries:
|
77 |
-
await asyncio.sleep(2 * retry_count)
|
78 |
-
continue
|
79 |
-
raise e
|
80 |
-
|
81 |
-
raise Exception(f"Failed to generate valid story text after {max_retries} attempts")
|
82 |
|
83 |
async def generate_ending(self, story_beat: int, ending_type: str, current_scene: str, story_history: str) -> StoryTextResponse:
|
84 |
"""Génère un texte de fin approprié basé sur la situation actuelle."""
|
@@ -89,49 +97,14 @@ The ending should feel like a natural continuation of the current scene."""
|
|
89 |
story_history=story_history
|
90 |
)
|
91 |
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
try:
|
97 |
-
response_content = await self.mistral_client.generate_story(messages)
|
98 |
-
return self._parse_response(response_content)
|
99 |
-
except Exception as e:
|
100 |
-
print(f"Error generating ending text: {str(e)}")
|
101 |
-
retry_count += 1
|
102 |
-
if retry_count < max_retries:
|
103 |
-
await asyncio.sleep(2 * retry_count)
|
104 |
-
continue
|
105 |
-
raise e
|
106 |
-
|
107 |
-
raise Exception(f"Failed to generate valid ending text after {max_retries} attempts")
|
108 |
-
|
109 |
-
def _parse_response(self, response_content: str) -> StoryTextResponse:
|
110 |
-
"""Parse la réponse JSON et gère les erreurs."""
|
111 |
-
try:
|
112 |
-
# Essayer de parser directement le JSON
|
113 |
-
data = json.loads(response_content)
|
114 |
-
# Nettoyer le texte avant de créer la réponse
|
115 |
-
if 'story_text' in data:
|
116 |
-
data['story_text'] = self._clean_story_text(data['story_text'])
|
117 |
-
return StoryTextResponse(**data)
|
118 |
-
except (json.JSONDecodeError, ValueError):
|
119 |
-
# Si le parsing échoue, extraire le texte directement
|
120 |
-
cleaned_text = self._clean_story_text(response_content.strip())
|
121 |
-
return StoryTextResponse(story_text=cleaned_text)
|
122 |
-
|
123 |
-
def _clean_story_text(self, text: str) -> str:
|
124 |
-
"""Nettoie le texte des métadonnées et autres suffixes."""
|
125 |
-
text = text.replace("\n", " ").strip()
|
126 |
-
text = text.split("Radiation level:")[0].strip()
|
127 |
-
text = text.split("RADIATION:")[0].strip()
|
128 |
-
text = text.split("[")[0].strip() # Supprimer les métadonnées entre crochets
|
129 |
-
return text
|
130 |
|
131 |
class ImagePromptsGenerator:
|
132 |
def __init__(self, mistral_client: MistralClient):
|
133 |
self.mistral_client = mistral_client
|
134 |
-
self.parser = PydanticOutputParser(pydantic_object=StoryPromptsResponse)
|
135 |
self.prompt = self._create_prompt()
|
136 |
|
137 |
def _create_prompt(self) -> ChatPromptTemplate:
|
@@ -152,8 +125,8 @@ Generate panel descriptions following the format specified."""
|
|
152 |
return f"{prompt} {SARAH_VISUAL_DESCRIPTION}"
|
153 |
return prompt
|
154 |
|
155 |
-
def
|
156 |
-
"""Parse la réponse
|
157 |
try:
|
158 |
# Essayer de parser directement le JSON
|
159 |
data = json.loads(response_content)
|
@@ -183,31 +156,18 @@ Generate panel descriptions following the format specified."""
|
|
183 |
"""Génère les prompts d'images basés sur le texte de l'histoire."""
|
184 |
messages = self.prompt.format_messages(story_text=story_text)
|
185 |
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
try:
|
191 |
-
response_content = await self.mistral_client.generate_story(messages)
|
192 |
-
# Parser la réponse
|
193 |
-
parsed_response = self._parse_response(response_content)
|
194 |
-
# Enrichir les prompts avec la description de Sarah
|
195 |
-
parsed_response.image_prompts = [self.enrich_prompt(prompt) for prompt in parsed_response.image_prompts]
|
196 |
-
return parsed_response
|
197 |
-
except Exception as e:
|
198 |
-
print(f"Error generating image prompts: {str(e)}")
|
199 |
-
retry_count += 1
|
200 |
-
if retry_count < max_retries:
|
201 |
-
await asyncio.sleep(2 * retry_count)
|
202 |
-
continue
|
203 |
-
raise e
|
204 |
|
205 |
-
|
|
|
|
|
206 |
|
207 |
class MetadataGenerator:
|
208 |
def __init__(self, mistral_client: MistralClient):
|
209 |
self.mistral_client = mistral_client
|
210 |
-
self.parser = PydanticOutputParser(pydantic_object=StoryMetadataResponse)
|
211 |
self.prompt = self._create_prompt()
|
212 |
|
213 |
def _create_prompt(self, error_feedback: str = None) -> ChatPromptTemplate:
|
@@ -226,8 +186,8 @@ Generate the metadata following the format specified."""
|
|
226 |
]
|
227 |
)
|
228 |
|
229 |
-
def
|
230 |
-
"""Parse la réponse
|
231 |
try:
|
232 |
# Essayer de parser directement le JSON
|
233 |
data = json.loads(response_content)
|
@@ -248,35 +208,16 @@ Generate the metadata following the format specified."""
|
|
248 |
raise ValueError(str(e))
|
249 |
|
250 |
async def generate(self, story_text: str, current_time: str, current_location: str, story_beat: int) -> StoryMetadataResponse:
|
251 |
-
"""Génère les métadonnées de l'histoire
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
error_feedback = f"\nPrevious attempt failed: {last_error}\nPlease fix this issue." if last_error else ""
|
260 |
-
prompt = self._create_prompt(error_feedback)
|
261 |
-
|
262 |
-
messages = prompt.format_messages(
|
263 |
-
story_text=story_text,
|
264 |
-
current_time=current_time,
|
265 |
-
current_location=current_location,
|
266 |
-
story_beat=story_beat,
|
267 |
-
error_feedback=error_feedback
|
268 |
-
)
|
269 |
-
|
270 |
-
response_content = await self.mistral_client.generate_story(messages)
|
271 |
-
# Parser la réponse
|
272 |
-
return self._parse_response(response_content, current_time, current_location)
|
273 |
-
except Exception as e:
|
274 |
-
print(f"Error generating metadata: {str(e)}")
|
275 |
-
last_error = str(e)
|
276 |
-
retry_count += 1
|
277 |
-
if retry_count < max_retries:
|
278 |
-
await asyncio.sleep(2 * retry_count)
|
279 |
-
continue
|
280 |
-
raise e
|
281 |
|
282 |
-
|
|
|
|
|
|
|
|
13 |
class TextGenerator:
|
14 |
def __init__(self, mistral_client: MistralClient):
|
15 |
self.mistral_client = mistral_client
|
|
|
16 |
self.prompt = self._create_prompt()
|
17 |
|
18 |
def _create_prompt(self) -> ChatPromptTemplate:
|
|
|
50 |
]
|
51 |
)
|
52 |
|
53 |
+
def _clean_story_text(self, text: str) -> str:
|
54 |
+
"""Nettoie le texte des métadonnées et autres suffixes."""
|
55 |
+
text = text.replace("\n", " ").strip()
|
56 |
+
text = text.split("Radiation level:")[0].strip()
|
57 |
+
text = text.split("RADIATION:")[0].strip()
|
58 |
+
text = text.split("[")[0].strip() # Supprimer les métadonnées entre crochets
|
59 |
+
return text
|
60 |
+
|
61 |
+
def _custom_parser(self, response_content: str) -> StoryTextResponse:
|
62 |
+
"""Parse la réponse et gère les erreurs."""
|
63 |
+
try:
|
64 |
+
# Essayer de parser directement le JSON
|
65 |
+
data = json.loads(response_content)
|
66 |
+
# Nettoyer le texte avant de créer la réponse
|
67 |
+
if 'story_text' in data:
|
68 |
+
data['story_text'] = self._clean_story_text(data['story_text'])
|
69 |
+
return StoryTextResponse(**data)
|
70 |
+
except (json.JSONDecodeError, ValueError):
|
71 |
+
# Si le parsing échoue, extraire le texte directement
|
72 |
+
cleaned_text = self._clean_story_text(response_content.strip())
|
73 |
+
return StoryTextResponse(story_text=cleaned_text)
|
74 |
+
|
75 |
async def generate(self, story_beat: int, radiation_level: int, current_time: str, current_location: str, previous_choice: str, story_history: str) -> StoryTextResponse:
|
76 |
"""Génère le texte de l'histoire."""
|
77 |
messages = self.prompt.format_messages(
|
|
|
83 |
story_history=story_history
|
84 |
)
|
85 |
|
86 |
+
return await self.mistral_client.generate(
|
87 |
+
messages=messages,
|
88 |
+
custom_parser=self._custom_parser
|
89 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
91 |
async def generate_ending(self, story_beat: int, ending_type: str, current_scene: str, story_history: str) -> StoryTextResponse:
|
92 |
"""Génère un texte de fin approprié basé sur la situation actuelle."""
|
|
|
97 |
story_history=story_history
|
98 |
)
|
99 |
|
100 |
+
return await self.mistral_client.generate(
|
101 |
+
messages=messages,
|
102 |
+
custom_parser=self._custom_parser
|
103 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
104 |
|
105 |
class ImagePromptsGenerator:
|
106 |
def __init__(self, mistral_client: MistralClient):
|
107 |
self.mistral_client = mistral_client
|
|
|
108 |
self.prompt = self._create_prompt()
|
109 |
|
110 |
def _create_prompt(self) -> ChatPromptTemplate:
|
|
|
125 |
return f"{prompt} {SARAH_VISUAL_DESCRIPTION}"
|
126 |
return prompt
|
127 |
|
128 |
+
def _custom_parser(self, response_content: str) -> StoryPromptsResponse:
|
129 |
+
"""Parse la réponse et gère les erreurs."""
|
130 |
try:
|
131 |
# Essayer de parser directement le JSON
|
132 |
data = json.loads(response_content)
|
|
|
156 |
"""Génère les prompts d'images basés sur le texte de l'histoire."""
|
157 |
messages = self.prompt.format_messages(story_text=story_text)
|
158 |
|
159 |
+
response = await self.mistral_client.generate(
|
160 |
+
messages=messages,
|
161 |
+
custom_parser=self._custom_parser
|
162 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
|
164 |
+
# Enrichir les prompts avec la description de Sarah
|
165 |
+
response.image_prompts = [self.enrich_prompt(prompt) for prompt in response.image_prompts]
|
166 |
+
return response
|
167 |
|
168 |
class MetadataGenerator:
|
169 |
def __init__(self, mistral_client: MistralClient):
|
170 |
self.mistral_client = mistral_client
|
|
|
171 |
self.prompt = self._create_prompt()
|
172 |
|
173 |
def _create_prompt(self, error_feedback: str = None) -> ChatPromptTemplate:
|
|
|
186 |
]
|
187 |
)
|
188 |
|
189 |
+
def _custom_parser(self, response_content: str) -> StoryMetadataResponse:
|
190 |
+
"""Parse la réponse et gère les erreurs."""
|
191 |
try:
|
192 |
# Essayer de parser directement le JSON
|
193 |
data = json.loads(response_content)
|
|
|
208 |
raise ValueError(str(e))
|
209 |
|
210 |
async def generate(self, story_text: str, current_time: str, current_location: str, story_beat: int) -> StoryMetadataResponse:
|
211 |
+
"""Génère les métadonnées basées sur le texte de l'histoire."""
|
212 |
+
messages = self.prompt.format_messages(
|
213 |
+
story_text=story_text,
|
214 |
+
current_time=current_time,
|
215 |
+
current_location=current_location,
|
216 |
+
story_beat=story_beat,
|
217 |
+
error_feedback=""
|
218 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
219 |
|
220 |
+
return await self.mistral_client.generate(
|
221 |
+
messages=messages,
|
222 |
+
custom_parser=self._custom_parser
|
223 |
+
)
|
server/core/story_orchestrator.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
from api.models import StoryResponse, Choice
|
3 |
+
from services.mistral_client import MistralClient
|
4 |
+
from core.state.game_state import GameState
|
5 |
+
from core.generators.text_generator import TextGenerator
|
6 |
+
from core.generators.image_generator import ImageGenerator
|
7 |
+
from core.generators.metadata_generator import MetadataGenerator
|
8 |
+
from core.constants import GameConfig
|
9 |
+
|
10 |
+
class StoryOrchestrator:
|
11 |
+
"""Coordonne les différents générateurs pour produire l'histoire."""
|
12 |
+
|
13 |
+
def __init__(self, mistral_client: MistralClient):
|
14 |
+
self.text_generator = TextGenerator(mistral_client)
|
15 |
+
self.image_generator = ImageGenerator(mistral_client)
|
16 |
+
self.metadata_generator = MetadataGenerator(mistral_client)
|
17 |
+
|
18 |
+
def _is_ending(self, game_state: GameState, metadata_response) -> bool:
|
19 |
+
"""Détermine si c'est une fin de jeu."""
|
20 |
+
return (
|
21 |
+
game_state.is_radiation_death(metadata_response.radiation_increase) or
|
22 |
+
metadata_response.is_death or
|
23 |
+
metadata_response.is_victory
|
24 |
+
)
|
25 |
+
|
26 |
+
async def _handle_ending(self, game_state: GameState, text_response, metadata_response) -> StoryResponse:
|
27 |
+
"""Gère la génération d'une fin."""
|
28 |
+
# Déterminer le type de fin
|
29 |
+
ending_type = "victory" if metadata_response.is_victory else "death"
|
30 |
+
|
31 |
+
# Regénérer le texte avec le contexte de fin
|
32 |
+
text_response = await self.text_generator.generate_ending(
|
33 |
+
ending_type=ending_type,
|
34 |
+
current_scene=text_response.story_text,
|
35 |
+
story_history=game_state.format_history()
|
36 |
+
)
|
37 |
+
|
38 |
+
# Ne générer qu'une seule image pour la fin
|
39 |
+
prompts_response = await self.image_generator.generate(text_response.story_text)
|
40 |
+
if len(prompts_response.image_prompts) > 1:
|
41 |
+
prompts_response.image_prompts = [prompts_response.image_prompts[0]]
|
42 |
+
|
43 |
+
return self._build_response(
|
44 |
+
game_state=game_state,
|
45 |
+
text_response=text_response,
|
46 |
+
metadata_response=metadata_response,
|
47 |
+
image_prompts=prompts_response.image_prompts,
|
48 |
+
is_ending=True
|
49 |
+
)
|
50 |
+
|
51 |
+
def _build_response(self, game_state: GameState, text_response, metadata_response, image_prompts: List[str], is_ending: bool = False) -> StoryResponse:
|
52 |
+
"""Construit la réponse finale."""
|
53 |
+
choices = [] if is_ending else [
|
54 |
+
Choice(id=i, text=choice_text)
|
55 |
+
for i, choice_text in enumerate(metadata_response.choices, 1)
|
56 |
+
]
|
57 |
+
|
58 |
+
# Formater les prompts d'images avec le style et les métadonnées
|
59 |
+
formatted_prompts = [
|
60 |
+
self.image_generator.format_prompt(
|
61 |
+
prompt=prompt,
|
62 |
+
time=metadata_response.time,
|
63 |
+
location=metadata_response.location
|
64 |
+
)
|
65 |
+
for prompt in image_prompts
|
66 |
+
]
|
67 |
+
|
68 |
+
return StoryResponse(
|
69 |
+
story_text=text_response.story_text,
|
70 |
+
choices=choices,
|
71 |
+
is_victory=metadata_response.is_victory,
|
72 |
+
is_death=metadata_response.is_death,
|
73 |
+
radiation_level=game_state.radiation_level,
|
74 |
+
radiation_increase=metadata_response.radiation_increase,
|
75 |
+
time=metadata_response.time,
|
76 |
+
location=metadata_response.location,
|
77 |
+
raw_choices=metadata_response.choices,
|
78 |
+
image_prompts=formatted_prompts,
|
79 |
+
is_first_step=(game_state.story_beat == GameConfig.STORY_BEAT_INTRO)
|
80 |
+
)
|
81 |
+
|
82 |
+
async def generate_story_segment(self, game_state: GameState, previous_choice: str) -> StoryResponse:
|
83 |
+
"""Génère un segment complet de l'histoire."""
|
84 |
+
# 1. Générer le texte de l'histoire
|
85 |
+
text_response = await self.text_generator.generate(
|
86 |
+
story_beat=game_state.story_beat,
|
87 |
+
radiation_level=game_state.radiation_level,
|
88 |
+
current_time=game_state.current_time,
|
89 |
+
current_location=game_state.current_location,
|
90 |
+
previous_choice=previous_choice,
|
91 |
+
story_history=game_state.format_history()
|
92 |
+
)
|
93 |
+
|
94 |
+
# 2. Générer les métadonnées
|
95 |
+
metadata_response = await self.metadata_generator.generate(
|
96 |
+
story_text=text_response.story_text,
|
97 |
+
current_time=game_state.current_time,
|
98 |
+
current_location=game_state.current_location,
|
99 |
+
story_beat=game_state.story_beat
|
100 |
+
)
|
101 |
+
|
102 |
+
# 3. Vérifier si c'est une fin
|
103 |
+
if self._is_ending(game_state, metadata_response):
|
104 |
+
return await self._handle_ending(game_state, text_response, metadata_response)
|
105 |
+
|
106 |
+
# 4. Générer les prompts d'images
|
107 |
+
prompts_response = await self.image_generator.generate(text_response.story_text)
|
108 |
+
|
109 |
+
# 5. Construire et retourner la réponse
|
110 |
+
return self._build_response(
|
111 |
+
game_state=game_state,
|
112 |
+
text_response=text_response,
|
113 |
+
metadata_response=metadata_response,
|
114 |
+
image_prompts=prompts_response.image_prompts
|
115 |
+
)
|
server/core/styles/comic_styles.json
ADDED
@@ -0,0 +1,259 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"styles": [
|
3 |
+
{
|
4 |
+
"name": "Franco-Belge Ligne Claire",
|
5 |
+
"description": "Style épuré avec des lignes nettes et des couleurs plates",
|
6 |
+
"references": [
|
7 |
+
{
|
8 |
+
"artist": "Hergé",
|
9 |
+
"works": ["Tintin", "Les Aventures de Jo, Zette et Jocko"]
|
10 |
+
},
|
11 |
+
{
|
12 |
+
"artist": "Edgar P. Jacobs",
|
13 |
+
"works": ["Blake et Mortimer"]
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"artist": "Yves Chaland",
|
17 |
+
"works": ["Freddy Lombard", "Bob Fish"]
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"artist": "Joost Swarte",
|
21 |
+
"works": ["Modern Art", "L'Art Moderne"]
|
22 |
+
}
|
23 |
+
]
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"name": "Science-Fiction Européenne",
|
27 |
+
"description": "Style visionnaire mêlant précision technique et onirisme",
|
28 |
+
"references": [
|
29 |
+
{
|
30 |
+
"artist": "Moebius (Jean Giraud)",
|
31 |
+
"works": ["L'Incal", "Arzak", "Le Garage Hermétique", "Aedena"]
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"artist": "Philippe Druillet",
|
35 |
+
"works": ["Lone Sloane", "Salammbô", "Delirius"]
|
36 |
+
},
|
37 |
+
{
|
38 |
+
"artist": "François Schuiten",
|
39 |
+
"works": ["Les Cités Obscures", "La Fièvre d'Urbicande"]
|
40 |
+
}
|
41 |
+
]
|
42 |
+
},
|
43 |
+
{
|
44 |
+
"name": "Comics Américain Classique",
|
45 |
+
"description": "Style dynamique avec des couleurs vives et des ombrages marqués",
|
46 |
+
"references": [
|
47 |
+
{
|
48 |
+
"artist": "Jack Kirby",
|
49 |
+
"works": ["Fantastic Four", "New Gods", "Captain America"]
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"artist": "Steve Ditko",
|
53 |
+
"works": ["Spider-Man", "Doctor Strange", "Mr. A"]
|
54 |
+
},
|
55 |
+
{
|
56 |
+
"artist": "Neal Adams",
|
57 |
+
"works": ["Batman", "Green Lantern/Green Arrow", "X-Men"]
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"artist": "John Buscema",
|
61 |
+
"works": ["Conan the Barbarian", "Silver Surfer", "The Avengers"]
|
62 |
+
}
|
63 |
+
]
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"name": "Comics Moderne",
|
67 |
+
"description": "Style contemporain avec techniques mixtes et mise en page innovante",
|
68 |
+
"references": [
|
69 |
+
{
|
70 |
+
"artist": "Alex Ross",
|
71 |
+
"works": ["Kingdom Come", "Marvels", "Justice"]
|
72 |
+
},
|
73 |
+
{
|
74 |
+
"artist": "J.H. Williams III",
|
75 |
+
"works": ["Promethea", "Batwoman", "The Sandman: Overture"]
|
76 |
+
},
|
77 |
+
{
|
78 |
+
"artist": "Sean Murphy",
|
79 |
+
"works": ["Batman: White Knight", "Tokyo Ghost", "Punk Rock Jesus"]
|
80 |
+
}
|
81 |
+
]
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"name": "Manga Seinen",
|
85 |
+
"description": "Style mature avec des détails complexes et des contrastes forts",
|
86 |
+
"references": [
|
87 |
+
{
|
88 |
+
"artist": "Katsuhiro Otomo",
|
89 |
+
"works": ["Akira", "Domu", "Memories"]
|
90 |
+
},
|
91 |
+
{
|
92 |
+
"artist": "Tsutomu Nihei",
|
93 |
+
"works": ["Blame!", "Knights of Sidonia", "Biomega"]
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"artist": "Takehiko Inoue",
|
97 |
+
"works": ["Vagabond", "Real", "Slam Dunk"]
|
98 |
+
},
|
99 |
+
{
|
100 |
+
"artist": "Naoki Urasawa",
|
101 |
+
"works": ["Monster", "20th Century Boys", "Pluto"]
|
102 |
+
}
|
103 |
+
]
|
104 |
+
},
|
105 |
+
{
|
106 |
+
"name": "Manga Horror",
|
107 |
+
"description": "Style détaillé avec emphase sur l'horreur psychologique et corporelle",
|
108 |
+
"references": [
|
109 |
+
{
|
110 |
+
"artist": "Junji Ito",
|
111 |
+
"works": ["Uzumaki", "Tomie", "Gyo"]
|
112 |
+
},
|
113 |
+
{
|
114 |
+
"artist": "Suehiro Maruo",
|
115 |
+
"works": [
|
116 |
+
"Mr. Arashi's Amazing Freak Show",
|
117 |
+
"The Strange Tale of Panorama Island"
|
118 |
+
]
|
119 |
+
},
|
120 |
+
{
|
121 |
+
"artist": "Hideshi Hino",
|
122 |
+
"works": ["Hell Baby", "Panorama of Hell"]
|
123 |
+
}
|
124 |
+
]
|
125 |
+
},
|
126 |
+
{
|
127 |
+
"name": "BD Alternative",
|
128 |
+
"description": "Style expressif avec des techniques mixtes et une approche artistique unique",
|
129 |
+
"references": [
|
130 |
+
{
|
131 |
+
"artist": "Dave McKean",
|
132 |
+
"works": ["Arkham Asylum", "Signal to Noise", "Cages"]
|
133 |
+
},
|
134 |
+
{
|
135 |
+
"artist": "Bill Sienkiewicz",
|
136 |
+
"works": ["Elektra: Assassin", "Stray Toasters", "New Mutants"]
|
137 |
+
},
|
138 |
+
{
|
139 |
+
"artist": "Lorenzo Mattotti",
|
140 |
+
"works": ["Feux", "Le Bruit du givre", "Stigmates"]
|
141 |
+
},
|
142 |
+
{
|
143 |
+
"artist": "Kent Williams",
|
144 |
+
"works": ["Tell Me, Dark", "The Fountain", "Blood: A Tale"]
|
145 |
+
}
|
146 |
+
]
|
147 |
+
},
|
148 |
+
{
|
149 |
+
"name": "Noir et Blanc Expressionniste",
|
150 |
+
"description": "Style contrasté avec des noirs profonds et des blancs éclatants",
|
151 |
+
"references": [
|
152 |
+
{
|
153 |
+
"artist": "Frank Miller",
|
154 |
+
"works": ["Sin City", "300", "Ronin"]
|
155 |
+
},
|
156 |
+
{
|
157 |
+
"artist": "Alberto Breccia",
|
158 |
+
"works": ["Mort Cinder", "Perramus", "Los Mitos de Cthulhu"]
|
159 |
+
},
|
160 |
+
{
|
161 |
+
"artist": "Jacques Tardi",
|
162 |
+
"works": [
|
163 |
+
"C'était la guerre des tranchées",
|
164 |
+
"Le Cri du peuple",
|
165 |
+
"Adèle Blanc-Sec"
|
166 |
+
]
|
167 |
+
},
|
168 |
+
{
|
169 |
+
"artist": "José Muñoz",
|
170 |
+
"works": ["Alack Sinner", "Le Bar à Joe"]
|
171 |
+
}
|
172 |
+
]
|
173 |
+
},
|
174 |
+
{
|
175 |
+
"name": "Style Numérique Moderne",
|
176 |
+
"description": "Style contemporain utilisant les techniques digitales",
|
177 |
+
"references": [
|
178 |
+
{
|
179 |
+
"artist": "Bastien Vivès",
|
180 |
+
"works": ["Polina", "Le Goût du chlore", "Une sœur"]
|
181 |
+
},
|
182 |
+
{
|
183 |
+
"artist": "Bengal",
|
184 |
+
"works": ["Naja", "Luminae", "Death or Glory"]
|
185 |
+
},
|
186 |
+
{
|
187 |
+
"artist": "Claire Wendling",
|
188 |
+
"works": ["Les Lumières de l'Amalou", "Desk", "Iguana Bay"]
|
189 |
+
},
|
190 |
+
{
|
191 |
+
"artist": "Boulet",
|
192 |
+
"works": ["Notes", "La Page Blanche"]
|
193 |
+
}
|
194 |
+
]
|
195 |
+
},
|
196 |
+
{
|
197 |
+
"name": "Post-Apocalyptique",
|
198 |
+
"description": "Style rugueux avec des atmosphères sombres et des environnements dévastés",
|
199 |
+
"references": [
|
200 |
+
{
|
201 |
+
"artist": "Enki Bilal",
|
202 |
+
"works": ["La Trilogie Nikopol", "Animal'z", "Bug"]
|
203 |
+
},
|
204 |
+
{
|
205 |
+
"artist": "Juan Giménez",
|
206 |
+
"works": [
|
207 |
+
"La Caste des Méta-Barons",
|
208 |
+
"Le Quatrième Pouvoir",
|
209 |
+
"Leo Roa"
|
210 |
+
]
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"artist": "Geof Darrow",
|
214 |
+
"works": ["Hard Boiled", "Shaolin Cowboy", "Big Guy and Rusty"]
|
215 |
+
},
|
216 |
+
{
|
217 |
+
"artist": "Simon Bisley",
|
218 |
+
"works": ["Slaine", "Lobo", "Heavy Metal"]
|
219 |
+
}
|
220 |
+
]
|
221 |
+
},
|
222 |
+
{
|
223 |
+
"name": "Underground Américain",
|
224 |
+
"description": "Style brut et provocateur avec une approche contre-culturelle",
|
225 |
+
"references": [
|
226 |
+
{
|
227 |
+
"artist": "Robert Crumb",
|
228 |
+
"works": ["Zap Comix", "Fritz the Cat", "Mr. Natural"]
|
229 |
+
},
|
230 |
+
{
|
231 |
+
"artist": "Spain Rodriguez",
|
232 |
+
"works": ["Trashman", "Mean Bitch Thrills"]
|
233 |
+
},
|
234 |
+
{
|
235 |
+
"artist": "Gilbert Shelton",
|
236 |
+
"works": ["The Fabulous Furry Freak Brothers", "Wonder Wart-Hog"]
|
237 |
+
}
|
238 |
+
]
|
239 |
+
},
|
240 |
+
{
|
241 |
+
"name": "Nouveau Roman Graphique",
|
242 |
+
"description": "Style intimiste avec une approche narrative personnelle",
|
243 |
+
"references": [
|
244 |
+
{
|
245 |
+
"artist": "Chris Ware",
|
246 |
+
"works": ["Jimmy Corrigan", "Building Stories", "Rusty Brown"]
|
247 |
+
},
|
248 |
+
{
|
249 |
+
"artist": "Daniel Clowes",
|
250 |
+
"works": ["Ghost World", "David Boring", "Patience"]
|
251 |
+
},
|
252 |
+
{
|
253 |
+
"artist": "Adrian Tomine",
|
254 |
+
"works": ["Killing and Dying", "Shortcomings", "Optic Nerve"]
|
255 |
+
}
|
256 |
+
]
|
257 |
+
}
|
258 |
+
]
|
259 |
+
}
|
server/scripts/test_game.py
CHANGED
@@ -122,10 +122,10 @@ async def play_game(show_context: bool = False):
|
|
122 |
# Get player choice
|
123 |
while True:
|
124 |
try:
|
125 |
-
last_choice = int(input("\n👉 Your choice (1-
|
126 |
-
if 1 <= last_choice <=
|
127 |
break
|
128 |
-
print("❌ Invalid choice. Please choose 1
|
129 |
except ValueError:
|
130 |
print("❌ Please enter a number.")
|
131 |
|
|
|
122 |
# Get player choice
|
123 |
while True:
|
124 |
try:
|
125 |
+
last_choice = int(input(f"\n👉 Your choice (1-{len(response.choices)}): "))
|
126 |
+
if 1 <= last_choice <= len(response.choices):
|
127 |
break
|
128 |
+
print(f"❌ Invalid choice. Please choose between 1 and {len(response.choices)}.")
|
129 |
except ValueError:
|
130 |
print("❌ Please enter a number.")
|
131 |
|
server/server.py
CHANGED
@@ -33,7 +33,8 @@ app.add_middleware(
|
|
33 |
"http://localhost:5173",
|
34 |
f"http://localhost:{API_PORT}",
|
35 |
"https://huggingface.co",
|
36 |
-
"https://*.hf.space"
|
|
|
37 |
],
|
38 |
allow_credentials=True,
|
39 |
allow_methods=["*"],
|
|
|
33 |
"http://localhost:5173",
|
34 |
f"http://localhost:{API_PORT}",
|
35 |
"https://huggingface.co",
|
36 |
+
"https://*.hf.space",
|
37 |
+
"https://mistral-ai-game-jam-dont-lookup.hf.space"
|
38 |
],
|
39 |
allow_credentials=True,
|
40 |
allow_methods=["*"],
|
server/services/mistral_client.py
CHANGED
@@ -1,8 +1,13 @@
|
|
1 |
import asyncio
|
|
|
|
|
|
|
2 |
from langchain_mistralai.chat_models import ChatMistralAI
|
3 |
from langchain.schema import SystemMessage, HumanMessage
|
4 |
from langchain.schema.messages import BaseMessage
|
5 |
|
|
|
|
|
6 |
# Available Mistral models:
|
7 |
# - mistral-tiny : Fastest, cheapest, good for testing
|
8 |
# - mistral-small : Good balance of speed and quality
|
@@ -36,6 +41,7 @@ class MistralClient:
|
|
36 |
# Pour gérer le rate limit
|
37 |
self.last_call_time = 0
|
38 |
self.min_delay = 1 # 1 seconde minimum entre les appels
|
|
|
39 |
|
40 |
async def _wait_for_rate_limit(self):
|
41 |
"""Attend le temps nécessaire pour respecter le rate limit."""
|
@@ -46,29 +52,80 @@ class MistralClient:
|
|
46 |
await asyncio.sleep(self.min_delay - time_since_last_call)
|
47 |
|
48 |
self.last_call_time = asyncio.get_event_loop().time()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
async def
|
51 |
-
"""Génère une réponse à partir d'une liste de messages."""
|
52 |
-
|
53 |
-
await self._wait_for_rate_limit()
|
54 |
-
response = await self.model.ainvoke(messages)
|
55 |
-
return response.content
|
56 |
-
except Exception as e:
|
57 |
-
print(f"Error in Mistral API call: {str(e)}")
|
58 |
-
raise
|
59 |
|
60 |
async def transform_prompt(self, story_text: str, art_prompt: str) -> str:
|
61 |
"""Transforme un texte d'histoire en prompt artistique."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
try:
|
63 |
-
await self.
|
64 |
-
response = await self.model.ainvoke([{
|
65 |
-
"role": "system",
|
66 |
-
"content": art_prompt
|
67 |
-
}, {
|
68 |
-
"role": "user",
|
69 |
-
"content": f"Transform this story text into a comic panel description:\n{story_text}"
|
70 |
-
}])
|
71 |
-
return response.content
|
72 |
except Exception as e:
|
73 |
print(f"Error transforming prompt: {str(e)}")
|
74 |
return story_text
|
|
|
1 |
import asyncio
|
2 |
+
import json
|
3 |
+
from typing import TypeVar, Type, Optional, Callable
|
4 |
+
from pydantic import BaseModel
|
5 |
from langchain_mistralai.chat_models import ChatMistralAI
|
6 |
from langchain.schema import SystemMessage, HumanMessage
|
7 |
from langchain.schema.messages import BaseMessage
|
8 |
|
9 |
+
T = TypeVar('T', bound=BaseModel)
|
10 |
+
|
11 |
# Available Mistral models:
|
12 |
# - mistral-tiny : Fastest, cheapest, good for testing
|
13 |
# - mistral-small : Good balance of speed and quality
|
|
|
41 |
# Pour gérer le rate limit
|
42 |
self.last_call_time = 0
|
43 |
self.min_delay = 1 # 1 seconde minimum entre les appels
|
44 |
+
self.max_retries = 3
|
45 |
|
46 |
async def _wait_for_rate_limit(self):
|
47 |
"""Attend le temps nécessaire pour respecter le rate limit."""
|
|
|
52 |
await asyncio.sleep(self.min_delay - time_since_last_call)
|
53 |
|
54 |
self.last_call_time = asyncio.get_event_loop().time()
|
55 |
+
|
56 |
+
async def _generate_with_retry(
|
57 |
+
self,
|
58 |
+
messages: list[BaseMessage],
|
59 |
+
response_model: Optional[Type[T]] = None,
|
60 |
+
custom_parser: Optional[Callable[[str], T]] = None,
|
61 |
+
error_feedback: str = None
|
62 |
+
) -> T | str:
|
63 |
+
"""
|
64 |
+
Génère une réponse avec retry et parsing structuré optionnel.
|
65 |
+
|
66 |
+
Args:
|
67 |
+
messages: Liste des messages pour le modèle
|
68 |
+
response_model: Classe Pydantic pour parser la réponse
|
69 |
+
custom_parser: Fonction de parsing personnalisée
|
70 |
+
error_feedback: Feedback d'erreur à ajouter au prompt en cas de retry
|
71 |
+
"""
|
72 |
+
retry_count = 0
|
73 |
+
last_error = None
|
74 |
+
|
75 |
+
while retry_count < self.max_retries:
|
76 |
+
try:
|
77 |
+
# Ajouter le feedback d'erreur si présent
|
78 |
+
current_messages = messages.copy()
|
79 |
+
if error_feedback and retry_count > 0:
|
80 |
+
current_messages.append(HumanMessage(content=f"Previous error: {error_feedback}. Please try again."))
|
81 |
+
|
82 |
+
# Générer la réponse
|
83 |
+
await self._wait_for_rate_limit()
|
84 |
+
response = await self.model.ainvoke(current_messages)
|
85 |
+
content = response.content
|
86 |
+
|
87 |
+
# Si pas de parsing requis, retourner le contenu brut
|
88 |
+
if not response_model and not custom_parser:
|
89 |
+
return content
|
90 |
+
|
91 |
+
# Parser la réponse
|
92 |
+
if custom_parser:
|
93 |
+
return custom_parser(content)
|
94 |
+
|
95 |
+
# Essayer de parser avec le modèle Pydantic
|
96 |
+
try:
|
97 |
+
data = json.loads(content)
|
98 |
+
return response_model(**data)
|
99 |
+
except json.JSONDecodeError as e:
|
100 |
+
last_error = f"Invalid JSON format: {str(e)}"
|
101 |
+
raise ValueError(last_error)
|
102 |
+
except Exception as e:
|
103 |
+
last_error = str(e)
|
104 |
+
raise ValueError(last_error)
|
105 |
+
|
106 |
+
except Exception as e:
|
107 |
+
print(f"Error on attempt {retry_count + 1}/{self.max_retries}: {str(e)}")
|
108 |
+
retry_count += 1
|
109 |
+
if retry_count < self.max_retries:
|
110 |
+
await asyncio.sleep(2 * retry_count)
|
111 |
+
continue
|
112 |
+
raise Exception(f"Failed after {self.max_retries} attempts. Last error: {last_error or str(e)}")
|
113 |
|
114 |
+
async def generate(self, messages: list[BaseMessage], response_model: Optional[Type[T]] = None, custom_parser: Optional[Callable[[str], T]] = None) -> T | str:
|
115 |
+
"""Génère une réponse à partir d'une liste de messages avec parsing optionnel."""
|
116 |
+
return await self._generate_with_retry(messages, response_model, custom_parser)
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
|
118 |
async def transform_prompt(self, story_text: str, art_prompt: str) -> str:
|
119 |
"""Transforme un texte d'histoire en prompt artistique."""
|
120 |
+
messages = [{
|
121 |
+
"role": "system",
|
122 |
+
"content": art_prompt
|
123 |
+
}, {
|
124 |
+
"role": "user",
|
125 |
+
"content": f"Transform this story text into a comic panel description:\n{story_text}"
|
126 |
+
}]
|
127 |
try:
|
128 |
+
return await self._generate_with_retry(messages)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
except Exception as e:
|
130 |
print(f"Error transforming prompt: {str(e)}")
|
131 |
return story_text
|