Og2 commited on
Commit
dfafcae
·
verified ·
1 Parent(s): 517e621

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +281 -0
app.py ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #curl -H "Accept: application/json" https://Og2-wstest.hf.space/
2
+ #curl -H "Accept: application/json" https://Og2-wstest.hf.space/hello/
3
+ #curl -X POST https://Og2-wstest.hf.space/muws2/ -H "Content-Type: application/json" -d "{\"text\": \"Ceci est un texte exemple.\"}"
4
+ #curl -X POST "https://Og2-wstest.hf.space/upload-video/" -H "Content-Type: multipart/form-data" -F "file=@E:\nosave\MyDocs\AndroidStudio\PythonProjects\SportHobbyStats\Foosball\VideoAnnotation\MoviNet2\train\Block_1-2.1/CNFT2Toulouse_Block_1729.avi"
5
+
6
+
7
+ from fastapi import FastAPI, File, Form, UploadFile, HTTPException
8
+ from pathlib import Path
9
+ import os
10
+ from pydantic import BaseModel
11
+ import json
12
+
13
+ app = FastAPI()
14
+
15
+ # Définir un modèle pour recevoir un texte
16
+ class TextInput(BaseModel):
17
+ text: str
18
+
19
+ @app.post("/muws2/")
20
+ def process_text(input_data: TextInput):
21
+ # Récupérer le texte depuis l'entrée
22
+ input_text = input_data.text
23
+
24
+ # Créer un dictionnaire JSON avec le texte en sortie
25
+ output = {
26
+ "text": input_text
27
+ }
28
+
29
+ return output
30
+
31
+ @app.get("/hello/")
32
+ def greet_hello():
33
+ return {"msg": "Hello World!"}
34
+
35
+ @app.get("/")
36
+ def greet_hello():
37
+ return {"msg": "Ok!"}
38
+
39
+
40
+ @app.post("/upload-video/")
41
+ async def upload_video(file: UploadFile = File(...)):
42
+ # Lire le contenu du fichier
43
+ content = await file.read()
44
+ # Vous pouvez sauvegarder la vidéo ou la traiter ici
45
+ with open(f"uploaded_{file.filename}", "wb") as f:
46
+ f.write(content)
47
+
48
+ return {"filename": file.filename, "content_type": file.content_type}
49
+
50
+
51
+
52
+ UPLOAD_DIR = "uploads"
53
+ os.makedirs(UPLOAD_DIR, exist_ok=True)
54
+
55
+ @app.post("/upload-dropzone/")
56
+ async def upload_file(
57
+ file: UploadFile = File(...),
58
+ chunkIndex: int = Form(...),
59
+ totalChunks: int = Form(...),
60
+ fileName: str = Form(...),
61
+ directory: str = Form(...),
62
+ ):
63
+ try:
64
+ print(f"Received: chunkIndex={chunkIndex}, totalChunks={totalChunks}, fileName={fileName}, directory={directory}")
65
+ # Create the directory if it doesn't exist
66
+ target_dir = Path(UPLOAD_DIR) / directory
67
+ target_dir.mkdir(parents=True, exist_ok=True)
68
+
69
+ # Save the chunk
70
+ chunk_path = target_dir / f"{fileName}.part{chunkIndex}"
71
+ with open(chunk_path, "wb") as f:
72
+ f.write(await file.read())
73
+
74
+ # If it's the last chunk, reconstruct the file
75
+ if chunkIndex + 1 == totalChunks:
76
+ final_file_path = target_dir / fileName
77
+ with open(final_file_path, "wb") as final_file:
78
+ for i in range(totalChunks):
79
+ part_path = target_dir / f"{fileName}.part{i}"
80
+ with open(part_path, "rb") as part_file:
81
+ final_file.write(part_file.read())
82
+ os.remove(part_path) # Remove the chunk after merging
83
+
84
+ print(f"Final file path: {final_file_path}")
85
+
86
+ return {
87
+ "status": "success",
88
+ "message": "Chunk uploaded successfully.",
89
+ "file_path": str(final_file_path)
90
+ }
91
+
92
+ return {"status": "success", "message": "Chunk uploaded successfully."}
93
+
94
+ except Exception as e:
95
+ raise HTTPException(status_code=500, detail=f"Upload failed: {str(e)}")
96
+
97
+
98
+
99
+ import tensorflow as tf
100
+ import numpy as np
101
+ import cv2
102
+ import keras
103
+ from keras.saving import register_keras_serializable
104
+ from keras import layers
105
+ from huggingface_hub import hf_hub_download
106
+ from keras.applications.densenet import DenseNet121
107
+ #from tensorflow_docs.vis import embed
108
+
109
+ # Available backend options are: "jax", "torch", "tensorflow".
110
+ os.environ["KERAS_BACKEND"] = "tensorflow"
111
+
112
+
113
+ # Charger le modèle Keras
114
+ MAX_SEQ_LENGTH = 8
115
+ NUM_FEATURES = 1024
116
+ IMG_SIZE = 128
117
+
118
+ #center_crop_layer = layers.CenterCrop(IMG_SIZE, IMG_SIZE)
119
+ # Au lieu de CenterCrop
120
+ center_crop_layer = layers.Resizing(IMG_SIZE, IMG_SIZE)
121
+
122
+
123
+
124
+ def crop_center(frame):
125
+ cropped = center_crop_layer(frame[None, ...])
126
+ cropped = keras.ops.convert_to_numpy(cropped)
127
+ cropped = keras.ops.squeeze(cropped)
128
+ return cropped
129
+
130
+
131
+ # Following method is modified from this tutorial:
132
+ # https://www.tensorflow.org/hub/tutorials/action_recognition_with_tf_hub
133
+ def load_video(path, max_frames=0, offload_to_cpu=False):
134
+ cap = cv2.VideoCapture(path)
135
+ frames = []
136
+ try:
137
+ while True:
138
+ ret, frame = cap.read()
139
+ if not ret:
140
+ break
141
+ frame = frame[:, :, [2, 1, 0]]
142
+ frame = crop_center(frame)
143
+ if offload_to_cpu and keras.backend.backend() == "torch":
144
+ frame = frame.to("cpu")
145
+ frames.append(frame)
146
+
147
+ if len(frames) == max_frames:
148
+ break
149
+ finally:
150
+ cap.release()
151
+ if offload_to_cpu and keras.backend.backend() == "torch":
152
+ return np.array([frame.to("cpu").numpy() for frame in frames])
153
+ return np.array(frames)
154
+
155
+ def build_feature_extractor():
156
+ feature_extractor = DenseNet121(
157
+ weights="imagenet",
158
+ include_top=False,
159
+ pooling="avg",
160
+ input_shape=(IMG_SIZE, IMG_SIZE, 3),
161
+ )
162
+ preprocess_input = keras.applications.densenet.preprocess_input
163
+
164
+ inputs = keras.Input((IMG_SIZE, IMG_SIZE, 3))
165
+ preprocessed = preprocess_input(inputs)
166
+
167
+ outputs = feature_extractor(preprocessed)
168
+ return keras.Model(inputs, outputs, name="feature_extractor")
169
+
170
+
171
+ feature_extractor = build_feature_extractor()
172
+
173
+
174
+ @keras.saving.register_keras_serializable()
175
+ class PositionalEmbedding(layers.Layer):
176
+ def __init__(self, sequence_length, output_dim, **kwargs):
177
+ super().__init__(**kwargs)
178
+ self.position_embeddings = layers.Embedding(
179
+ input_dim=sequence_length, output_dim=output_dim
180
+ )
181
+ self.sequence_length = sequence_length
182
+ self.output_dim = output_dim
183
+
184
+ def build(self, input_shape):
185
+ self.position_embeddings.build(input_shape)
186
+
187
+ def call(self, inputs):
188
+ # The inputs are of shape: `(batch_size, frames, num_features)`
189
+ inputs = keras.ops.cast(inputs, self.compute_dtype)
190
+ length = keras.ops.shape(inputs)[1]
191
+ positions = keras.ops.arange(start=0, stop=length, step=1)
192
+ embedded_positions = self.position_embeddings(positions)
193
+ return inputs + embedded_positions
194
+
195
+ @keras.saving.register_keras_serializable()
196
+ class TransformerEncoder(layers.Layer):
197
+ def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
198
+ super().__init__(**kwargs)
199
+ self.embed_dim = embed_dim
200
+ self.dense_dim = dense_dim
201
+ self.num_heads = num_heads
202
+ self.attention = layers.MultiHeadAttention(
203
+ num_heads=num_heads, key_dim=embed_dim, dropout=0.3
204
+ )
205
+ self.dense_proj = keras.Sequential(
206
+ [
207
+ layers.Dense(dense_dim, activation=keras.activations.gelu),
208
+ layers.Dense(embed_dim),
209
+ ]
210
+ )
211
+ self.layernorm_1 = layers.LayerNormalization()
212
+ self.layernorm_2 = layers.LayerNormalization()
213
+
214
+ def call(self, inputs, mask=None):
215
+ attention_output = self.attention(inputs, inputs, attention_mask=mask)
216
+ proj_input = self.layernorm_1(inputs + attention_output)
217
+ proj_output = self.dense_proj(proj_input)
218
+ return self.layernorm_2(proj_input + proj_output)
219
+
220
+
221
+ #model = keras.saving.load_model("hf://Og2/videoclassif")
222
+ model = keras.saving.load_model("hf://Og2/videoclassif", custom_objects={'PositionalEmbedding': PositionalEmbedding, 'TransformerEncoder': TransformerEncoder})
223
+
224
+ # Identifier le modèle Hugging Face et le fichier que vous voulez lire
225
+ model_repo = "Og2/videoclassif" # Remplacez par votre modèle spécifique
226
+ file_name = "labels.txt" # Le fichier que vous voulez télécharger
227
+
228
+ # Télécharger le fichier depuis Hugging Face Hub
229
+ labels_file_path = hf_hub_download(repo_id=model_repo, filename=file_name)
230
+
231
+ # Liste des classes du modèle
232
+ # Nom du fichier de classe
233
+ #input_file = "hf://Og2/videoclassif/labels.txt"
234
+ # Lecture du fichier et création de la liste
235
+ with open(labels_file_path, "r") as file:
236
+ class_labels = [line.strip() for line in file]
237
+ #print("Tableau recréé à partir du fichier :")
238
+ #print(class_labels)
239
+
240
+
241
+ # test on video from val dataset
242
+ def prepare_single_video(frames):
243
+ frame_features = np.zeros(shape=(1, MAX_SEQ_LENGTH, NUM_FEATURES), dtype="float32")
244
+
245
+ # Pad shorter videos.
246
+ if len(frames) < MAX_SEQ_LENGTH:
247
+ diff = MAX_SEQ_LENGTH - len(frames)
248
+ padding = np.zeros((diff, IMG_SIZE, IMG_SIZE, 3))
249
+ frames = np.concatenate(frames, padding)
250
+
251
+ frames = frames[None, ...]
252
+
253
+ # Extract features from the frames of the current video.
254
+ for i, batch in enumerate(frames):
255
+ video_length = batch.shape[0]
256
+ length = min(MAX_SEQ_LENGTH, video_length)
257
+ for j in range(length):
258
+ if np.mean(batch[j, :]) > 0.0:
259
+ frame_features[i, j, :] = feature_extractor.predict(batch[None, j, :])
260
+ else:
261
+ frame_features[i, j, :] = 0.0
262
+
263
+ return frame_features
264
+
265
+
266
+ async def predict_action(video):
267
+ print("##### predict_action started #####")
268
+ frames = load_video(video, offload_to_cpu=True)
269
+ frame_features = prepare_single_video(frames)
270
+ probabilities = model.predict(frame_features)[0]
271
+ # Obtenir le top 5
272
+ top_5_indices = np.argsort(probabilities)[::-1][:5]
273
+ results = {class_labels[i]: float(probabilities[i]) for i in top_5_indices}
274
+ #return results
275
+
276
+ # Sauvegarder le JSON dans un fichier temporaire
277
+ output_file = "result.json"
278
+ with open(output_file, "w") as f:
279
+ json.dump(results, f)
280
+
281
+ return results