ChandimaPrabath commited on
Commit
20b65a4
·
1 Parent(s): 4afe0d2

major update v0.2

Browse files
Files changed (4) hide show
  1. Instance.py +376 -0
  2. TODO.md +5 -0
  3. app.backup.py +368 -0
  4. app.py +32 -168
Instance.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import json
4
+ import urllib.request
5
+ import time
6
+ from threading import Thread
7
+ from requests.exceptions import RequestException
8
+ from tqdm import tqdm
9
+ from indexer import indexer
10
+ import re
11
+
12
+ CACHE_DIR = os.getenv("CACHE_DIR")
13
+
14
+ download_progress = {}
15
+
16
+ class Instance:
17
+ def __init__(self, id, url, cache_dir, index_file, token, repo):
18
+ self.id = id
19
+ self.url = url
20
+ self.CACHE_DIR = cache_dir
21
+ self.INDEX_FILE = index_file
22
+ self.TOKEN = token
23
+ self.REPO = repo
24
+ self.FILM_STORE_JSON_PATH = os.path.join(cache_dir, "film_store.json")
25
+ self.TV_STORE_JSON_PATH = os.path.join(cache_dir, "tv_store.json")
26
+ self.download_threads = {}
27
+ self.file_structure = None
28
+
29
+ # Ensure CACHE_DIR exists
30
+ if not os.path.exists(self.CACHE_DIR):
31
+ os.makedirs(self.CACHE_DIR)
32
+
33
+ for path in [self.FILM_STORE_JSON_PATH, self.TV_STORE_JSON_PATH]:
34
+ if not os.path.exists(path):
35
+ with open(path, 'w') as json_file:
36
+ json.dump({}, json_file)
37
+
38
+ # Index the file structure
39
+ indexer()
40
+
41
+ # Load the file structure JSON
42
+ if not os.path.exists(self.INDEX_FILE):
43
+ raise FileNotFoundError(f"{self.INDEX_FILE} not found. Please make sure the file exists.")
44
+
45
+ with open(self.INDEX_FILE, 'r') as f:
46
+ self.file_structure = json.load(f)
47
+
48
+ # Start prefetching metadata
49
+ thread = Thread(target=self.start_prefetching)
50
+ thread.daemon = True
51
+ thread.start()
52
+
53
+ def compile_report(self):
54
+ film_store_path = os.path.join(CACHE_DIR, "film_store.json")
55
+ tv_store_path = os.path.join(CACHE_DIR, "tv_store.json")
56
+ cache_size = self.get_cache_size()
57
+
58
+ report = {
59
+ "id": self.id,
60
+ "film_store": self.read_json(film_store_path),
61
+ "tv_store": self.read_json(tv_store_path),
62
+ "cache_size": cache_size
63
+ }
64
+ return report
65
+
66
+ def get_cache_size(self):
67
+ total_size = 0
68
+ for dirpath, dirnames, filenames in os.walk(CACHE_DIR):
69
+ for f in filenames:
70
+ fp = os.path.join(dirpath, f)
71
+ total_size += os.path.getsize(fp)
72
+ return {"cache_size": f"{total_size / (1024 * 1024 * 1024):.2f} GB"}
73
+
74
+ @staticmethod
75
+ def read_json(file_path):
76
+ if os.path.exists(file_path):
77
+ with open(file_path, 'r') as json_file:
78
+ return json.load(json_file)
79
+ return {}
80
+
81
+ @staticmethod
82
+ def get_system_proxies():
83
+ """
84
+ Retrieves the system's HTTP and HTTPS proxies.
85
+
86
+ Returns:
87
+ dict: A dictionary containing the proxies.
88
+ """
89
+ try:
90
+ proxies = urllib.request.getproxies()
91
+ print("System proxies:", proxies)
92
+ return {
93
+ "http": proxies.get("http"),
94
+ "https": proxies.get("http")
95
+ }
96
+ except Exception as e:
97
+ print(f"Error getting system proxies: {e}")
98
+ return {}
99
+
100
+ @staticmethod
101
+ def download_film(file_url, token, cache_path, proxies, film_id, title, chunk_size=100 * 1024 * 1024):
102
+ """
103
+ Downloads a file from the specified URL and saves it to the cache path.
104
+ Tracks the download progress.
105
+
106
+ Args:
107
+ file_url (str): The URL of the file to download.
108
+ token (str): The authorization token for the request.
109
+ cache_path (str): The path to save the downloaded file.
110
+ proxies (dict): Proxies for the request.
111
+ film_id (str): Unique identifier for the film download.
112
+ title (str): The title of the film.
113
+ chunk_size (int): Size of each chunk to download.
114
+ """
115
+ print(f"Downloading file from URL: {file_url} to {cache_path} with proxies: {proxies}")
116
+ headers = {'Authorization': f'Bearer {token}'}
117
+ try:
118
+ response = requests.get(file_url, headers=headers, proxies=proxies, stream=True)
119
+ response.raise_for_status()
120
+
121
+ total_size = int(response.headers.get('content-length', 0))
122
+ download_progress[film_id] = {"total": total_size, "downloaded": 0, "status": "Downloading", "start_time": time.time()}
123
+
124
+ os.makedirs(os.path.dirname(cache_path), exist_ok=True)
125
+ with open(cache_path, 'wb') as file, tqdm(total=total_size, unit='B', unit_scale=True, desc=cache_path) as pbar:
126
+ for data in response.iter_content(chunk_size=chunk_size):
127
+ file.write(data)
128
+ pbar.update(len(data))
129
+ download_progress[film_id]["downloaded"] += len(data)
130
+
131
+ print(f'File cached to {cache_path} successfully.')
132
+ Instance.update_film_store_json(title, cache_path)
133
+ download_progress[film_id]["status"] = "Completed"
134
+ except RequestException as e:
135
+ print(f"Error downloading file: {e}")
136
+ download_progress[film_id]["status"] = "Failed"
137
+ except IOError as e:
138
+ print(f"Error writing file {cache_path}: {e}")
139
+ download_progress[film_id]["status"] = "Failed"
140
+ finally:
141
+ if download_progress[film_id]["status"] != "Downloading":
142
+ download_progress[film_id]["end_time"] = time.time()
143
+
144
+ @staticmethod
145
+ def get_download_progress(id):
146
+ """
147
+ Gets the download progress for a specific film.
148
+
149
+ Args:
150
+ film_id (str): The unique identifier for the film download.
151
+
152
+ Returns:
153
+ dict: A dictionary containing the total size, downloaded size, progress percentage, status, and ETA.
154
+ """
155
+ if id in download_progress:
156
+ total = download_progress[id]["total"]
157
+ downloaded = download_progress[id]["downloaded"]
158
+ status = download_progress[id].get("status", "In Progress")
159
+ progress = (downloaded / total) * 100 if total > 0 else 0
160
+
161
+ eta = None
162
+ if status == "Downloading" and downloaded > 0:
163
+ elapsed_time = time.time() - download_progress[id]["start_time"]
164
+ estimated_total_time = elapsed_time * (total / downloaded)
165
+ eta = estimated_total_time - elapsed_time
166
+ elif status == "Completed":
167
+ eta = 0
168
+
169
+ return {"total": total, "downloaded": downloaded, "progress": progress, "status": status, "eta": eta}
170
+ return {"total": 0, "downloaded": 0, "progress": 0, "status": "Not Found", "eta": None}
171
+
172
+ @staticmethod
173
+ def update_film_store_json(title, cache_path):
174
+ """
175
+ Updates the film store JSON with the new file.
176
+
177
+ Args:
178
+ title (str): The title of the film.
179
+ cache_path (str): The local path where the file is saved.
180
+ """
181
+ FILM_STORE_JSON_PATH = os.path.join(CACHE_DIR, "film_store.json")
182
+
183
+ film_store_data = {}
184
+ if os.path.exists(FILM_STORE_JSON_PATH):
185
+ with open(FILM_STORE_JSON_PATH, 'r') as json_file:
186
+ film_store_data = json.load(json_file)
187
+
188
+ film_store_data[title] = cache_path
189
+
190
+ with open(FILM_STORE_JSON_PATH, 'w') as json_file:
191
+ json.dump(film_store_data, json_file, indent=2)
192
+ print(f'Film store updated with {title}.')
193
+
194
+ @staticmethod
195
+ def download_episode(file_url, token, cache_path, proxies, episode_id, title, chunk_size=100 * 1024 * 1024):
196
+ """
197
+ Downloads a file from the specified URL and saves it to the cache path.
198
+ Tracks the download progress.
199
+
200
+ Args:
201
+ file_url (str): The URL of the file to download.
202
+ token (str): The authorization token for the request.
203
+ cache_path (str): The path to save the downloaded file.
204
+ proxies (dict): Proxies for the request.
205
+ episode_id (str): Unique identifier for the film download.
206
+ title (str): The title of the film.
207
+ chunk_size (int): Size of each chunk to download.
208
+ """
209
+ print(f"Downloading file from URL: {file_url} to {cache_path} with proxies: {proxies}")
210
+ headers = {'Authorization': f'Bearer {token}'}
211
+ try:
212
+ response = requests.get(file_url, headers=headers, proxies=proxies, stream=True)
213
+ response.raise_for_status()
214
+
215
+ total_size = int(response.headers.get('content-length', 0))
216
+ download_progress[episode_id] = {"total": total_size, "downloaded": 0, "status": "Downloading", "start_time": time.time()}
217
+
218
+ os.makedirs(os.path.dirname(cache_path), exist_ok=True)
219
+ with open(cache_path, 'wb') as file, tqdm(total=total_size, unit='B', unit_scale=True, desc=cache_path) as pbar:
220
+ for data in response.iter_content(chunk_size=chunk_size):
221
+ file.write(data)
222
+ pbar.update(len(data))
223
+ download_progress[episode_id]["downloaded"] += len(data)
224
+
225
+ print(f'File cached to {cache_path} successfully.')
226
+ Instance.update_tv_store_json(title, cache_path)
227
+ download_progress[episode_id]["status"] = "Completed"
228
+ except RequestException as e:
229
+ print(f"Error downloading file: {e}")
230
+ download_progress[episode_id]["status"] = "Failed"
231
+ except IOError as e:
232
+ print(f"Error writing file {cache_path}: {e}")
233
+ download_progress[episode_id]["status"] = "Failed"
234
+ finally:
235
+ if download_progress[episode_id]["status"] != "Downloading":
236
+ download_progress[episode_id]["end_time"] = time.time()
237
+
238
+ @staticmethod
239
+ def update_tv_store_json(title, cache_path):
240
+ """
241
+ Updates the TV store JSON with the new file, organizing by title, season, and episode.
242
+
243
+ Args:
244
+ title (str): The title of the TV show.
245
+ cache_path (str): The local path where the file is saved.
246
+ """
247
+ TV_STORE_JSON_PATH = os.path.join(CACHE_DIR, "tv_store.json")
248
+
249
+ tv_store_data = {}
250
+ if os.path.exists(TV_STORE_JSON_PATH):
251
+ with open(TV_STORE_JSON_PATH, 'r') as json_file:
252
+ tv_store_data = json.load(json_file)
253
+
254
+ # Extract season and episode information from the cache_path
255
+ season_part = os.path.basename(os.path.dirname(cache_path)) # Extracts 'Season 1'
256
+ episode_part = os.path.basename(cache_path) # Extracts 'Grand Blue Dreaming - S01E01 - Deep Blue HDTV-1080p.mkv'
257
+
258
+ # Organize the data by title, season, and episode
259
+ if title not in tv_store_data:
260
+ tv_store_data[title] = {}
261
+ if season_part not in tv_store_data[title]:
262
+ tv_store_data[title][season_part] = []
263
+ tv_store_data[title][season_part].append(episode_part)
264
+
265
+ with open(TV_STORE_JSON_PATH, 'w') as json_file:
266
+ json.dump(tv_store_data, json_file, indent=2)
267
+ print(f'TV store updated with {title}, {season_part}, {episode_part}.')
268
+
269
+
270
+ def load_json(self, file_path):
271
+ """Load JSON data from a file."""
272
+ with open(file_path, 'r') as file:
273
+ return json.load(file)
274
+
275
+ def find_movie_path(self, title):
276
+ """Find the path of the movie in the JSON data based on the title."""
277
+ for directory in self.file_structure:
278
+ if directory['type'] == 'directory' and directory['path'] == 'films':
279
+ for sub_directory in directory['contents']:
280
+ if sub_directory['type'] == 'directory':
281
+ for item in sub_directory['contents']:
282
+ if item['type'] == 'file' and title.lower() in item['path'].lower():
283
+ return item['path']
284
+ return None
285
+
286
+ def find_tv_path(self, title):
287
+ """Find the path of the TV show in the JSON data based on the title."""
288
+ for directory in self.file_structure:
289
+ if directory['type'] == 'directory' and directory['path'] == 'tv':
290
+ for sub_directory in directory['contents']:
291
+ if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
292
+ return sub_directory['path']
293
+ return None
294
+
295
+ def get_tv_structure(self, title):
296
+ """Find the path of the TV show in the JSON data based on the title."""
297
+ for directory in self.file_structure:
298
+ if directory['type'] == 'directory' and directory['path'] == 'tv':
299
+ for sub_directory in directory['contents']:
300
+ if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
301
+ return sub_directory
302
+ return None
303
+
304
+ def get_film_id(self, title):
305
+ """Generate a film ID based on the title."""
306
+ return title.replace(" ", "_").lower()
307
+
308
+ def prefetch_metadata(self):
309
+ """Prefetch metadata for all items in the file structure."""
310
+ for item in self.file_structure:
311
+ if 'contents' in item:
312
+ for sub_item in item['contents']:
313
+ original_title = sub_item['path'].split('/')[-1]
314
+ media_type = 'series' if item['path'].startswith('tv') else 'movie'
315
+ title = original_title
316
+ year = None
317
+
318
+ # Extract year from the title if available
319
+ match = re.search(r'\((\d{4})\)', original_title)
320
+ if match:
321
+ year_str = match.group(1)
322
+ if year_str.isdigit() and len(year_str) == 4:
323
+ title = original_title[:match.start()].strip()
324
+ year = int(year_str)
325
+ else:
326
+ parts = original_title.rsplit(' ', 1)
327
+ if len(parts) > 1 and parts[-1].isdigit() and len(parts[-1]) == 4:
328
+ title = parts[0].strip()
329
+ year = int(parts[-1])
330
+
331
+ self.fetch_and_cache_json(original_title, title, media_type, year)
332
+
333
+ def bytes_to_human_readable(self, num, suffix="B"):
334
+ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
335
+ if abs(num) < 1024.0:
336
+ return f"{num:3.1f} {unit}{suffix}"
337
+ num /= 1024.0
338
+ return f"{num:.1f} Y{suffix}"
339
+
340
+ def encode_episodeid(self, title, season, episode):
341
+ return f"{title}_{season}_{episode}"
342
+
343
+ def get_all_tv_shows(self):
344
+ """Get all TV shows from the indexed cache structure JSON file."""
345
+ tv_shows = {}
346
+ for directory in self.file_structure:
347
+ if directory['type'] == 'directory' and directory['path'] == 'tv':
348
+ for sub_directory in directory['contents']:
349
+ if sub_directory['type'] == 'directory':
350
+ show_title = sub_directory['path'].split('/')[-1]
351
+ tv_shows[show_title] = []
352
+ for season_directory in sub_directory['contents']:
353
+ if season_directory['type'] == 'directory':
354
+ season = season_directory['path'].split('/')[-1]
355
+ for episode in season_directory['contents']:
356
+ if episode['type'] == 'file':
357
+ tv_shows[show_title].append({
358
+ "season": season,
359
+ "episode": episode['path'].split('/')[-1],
360
+ "path": episode['path']
361
+ })
362
+ return tv_shows
363
+
364
+ def get_all_films(self):
365
+ """Get all films from the indexed cache structure JSON file."""
366
+ films = []
367
+ for directory in self.file_structure:
368
+ if directory['type'] == 'directory' and directory['path'] == 'films':
369
+ for sub_directory in directory['contents']:
370
+ if sub_directory['type'] == 'directory':
371
+ films.append(sub_directory['path'])
372
+ return films
373
+
374
+ def start_prefetching(self):
375
+ """Start the metadata prefetching in a separate thread."""
376
+ self.prefetch_metadata()
TODO.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ ## TODO
2
+ * Create Instance class
3
+ * add a method to register the instance to Load Balancer on ```[POST]<load_balancer_url>/api/register``` route
4
+ * add `[GET] /api/get/report` route to return a report consist of film_store.json, tv_store.json, cache_size
5
+ * add two api routes to delete a certain film or episode
app.backup.py ADDED
@@ -0,0 +1,368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, jsonify, request, send_from_directory
2
+ from flask_cors import CORS
3
+ import os
4
+ import json
5
+ import threading
6
+ import urllib.parse
7
+ from hf_scrapper import download_film, download_episode, get_system_proxies, get_download_progress
8
+ from indexer import indexer
9
+ from tvdb import fetch_and_cache_json
10
+ import re
11
+
12
+ app = Flask(__name__)
13
+ CORS(app)
14
+
15
+ # Constants and Configuration
16
+ CACHE_DIR = os.getenv("CACHE_DIR")
17
+ INDEX_FILE = os.getenv("INDEX_FILE")
18
+ TOKEN = os.getenv("TOKEN")
19
+ FILM_STORE_JSON_PATH = os.path.join(CACHE_DIR, "film_store.json")
20
+ TV_STORE_JSON_PATH = os.path.join(CACHE_DIR, "tv_store.json")
21
+ REPO = os.getenv("REPO")
22
+ download_threads = {}
23
+
24
+ # Ensure CACHE_DIR exists
25
+ if not os.path.exists(CACHE_DIR):
26
+ os.makedirs(CACHE_DIR)
27
+
28
+ for path in [FILM_STORE_JSON_PATH, TV_STORE_JSON_PATH]:
29
+ if not os.path.exists(path):
30
+ with open(path, 'w') as json_file:
31
+ json.dump({}, json_file)
32
+
33
+ # Index the file structure
34
+ indexer()
35
+
36
+ # Load the file structure JSON
37
+ if not os.path.exists(INDEX_FILE):
38
+ raise FileNotFoundError(f"{INDEX_FILE} not found. Please make sure the file exists.")
39
+
40
+ with open(INDEX_FILE, 'r') as f:
41
+ file_structure = json.load(f)
42
+
43
+ # Function Definitions
44
+
45
+ def load_json(file_path):
46
+ """Load JSON data from a file."""
47
+ with open(file_path, 'r') as file:
48
+ return json.load(file)
49
+
50
+ def find_movie_path(json_data, title):
51
+ """Find the path of the movie in the JSON data based on the title."""
52
+ for directory in json_data:
53
+ if directory['type'] == 'directory' and directory['path'] == 'films':
54
+ for sub_directory in directory['contents']:
55
+ if sub_directory['type'] == 'directory':
56
+ for item in sub_directory['contents']:
57
+ if item['type'] == 'file' and title.lower() in item['path'].lower():
58
+ return item['path']
59
+ return None
60
+
61
+ def find_tv_path(json_data, title):
62
+ """Find the path of the TV show in the JSON data based on the title."""
63
+ for directory in json_data:
64
+ if directory['type'] == 'directory' and directory['path'] == 'tv':
65
+ for sub_directory in directory['contents']:
66
+ if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
67
+ return sub_directory['path']
68
+ return None
69
+
70
+ def get_tv_structure(json_data,title):
71
+ """Find the path of the TV show in the JSON data based on the title."""
72
+ for directory in json_data:
73
+ if directory['type'] == 'directory' and directory['path'] == 'tv':
74
+ for sub_directory in directory['contents']:
75
+ if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
76
+ return sub_directory
77
+ return None
78
+
79
+ def get_film_id(title):
80
+ """Generate a film ID based on the title."""
81
+ return title.replace(" ", "_").lower()
82
+
83
+ def prefetch_metadata():
84
+ """Prefetch metadata for all items in the file structure."""
85
+ for item in file_structure:
86
+ if 'contents' in item:
87
+ for sub_item in item['contents']:
88
+ original_title = sub_item['path'].split('/')[-1]
89
+ media_type = 'series' if item['path'].startswith('tv') else 'movie'
90
+ title = original_title
91
+ year = None
92
+
93
+ # Extract year from the title if available
94
+ match = re.search(r'\((\d{4})\)', original_title)
95
+ if match:
96
+ year_str = match.group(1)
97
+ if year_str.isdigit() and len(year_str) == 4:
98
+ title = original_title[:match.start()].strip()
99
+ year = int(year_str)
100
+ else:
101
+ parts = original_title.rsplit(' ', 1)
102
+ if len(parts) > 1 and parts[-1].isdigit() and len(parts[-1]) == 4:
103
+ title = parts[0].strip()
104
+ year = int(parts[-1])
105
+
106
+ fetch_and_cache_json(original_title, title, media_type, year)
107
+
108
+ def bytes_to_human_readable(num, suffix="B"):
109
+ for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
110
+ if abs(num) < 1024.0:
111
+ return f"{num:3.1f} {unit}{suffix}"
112
+ num /= 1024.0
113
+ return f"{num:.1f} Y{suffix}"
114
+
115
+ def encode_episodeid(title,season,episode):
116
+ return f"{title}_{season}_{episode}"
117
+
118
+ def get_all_tv_shows(indexed_cache):
119
+ """Get all TV shows from the indexed cache structure JSON file."""
120
+ tv_shows = {}
121
+ for directory in indexed_cache:
122
+ if directory['type'] == 'directory' and directory['path'] == 'tv':
123
+ for sub_directory in directory['contents']:
124
+ if sub_directory['type'] == 'directory':
125
+ show_title = sub_directory['path'].split('/')[-1]
126
+ tv_shows[show_title] = []
127
+ for season_directory in sub_directory['contents']:
128
+ if season_directory['type'] == 'directory':
129
+ season = season_directory['path'].split('/')[-1]
130
+ for episode in season_directory['contents']:
131
+ if episode['type'] == 'file':
132
+ tv_shows[show_title].append({
133
+ "season": season,
134
+ "episode": episode['path'].split('/')[-1],
135
+ "path": episode['path']
136
+ })
137
+ return tv_shows
138
+
139
+ def get_all_films(indexed_cache):
140
+ """Get all films from the indexed cache structure JSON file."""
141
+ films = []
142
+ for directory in indexed_cache:
143
+ if directory['type'] == 'directory' and directory['path'] == 'films':
144
+ for sub_directory in directory['contents']:
145
+ if sub_directory['type'] == 'directory':
146
+ films.append(sub_directory['path'])
147
+ return films
148
+
149
+ def start_prefetching():
150
+ """Start the metadata prefetching in a separate thread."""
151
+ prefetch_metadata()
152
+
153
+ # Start prefetching metadata
154
+ thread = threading.Thread(target=start_prefetching)
155
+ thread.daemon = True
156
+ thread.start()
157
+
158
+ # API Endpoints
159
+
160
+ @app.route('/api/film', methods=['GET'])
161
+ def get_movie_api():
162
+ """Endpoint to get the movie by title."""
163
+ title = request.args.get('title')
164
+ if not title:
165
+ return jsonify({"error": "Title parameter is required"}), 400
166
+
167
+ # Load the film store JSON
168
+ with open(FILM_STORE_JSON_PATH, 'r') as json_file:
169
+ film_store_data = json.load(json_file)
170
+
171
+ # Check if the film is already cached
172
+ if title in film_store_data:
173
+ cache_path = film_store_data[title]
174
+ if os.path.exists(cache_path):
175
+ return send_from_directory(os.path.dirname(cache_path), os.path.basename(cache_path))
176
+
177
+ movie_path = find_movie_path(file_structure, title)
178
+
179
+ if not movie_path:
180
+ return jsonify({"error": "Movie not found"}), 404
181
+
182
+ cache_path = os.path.join(CACHE_DIR, movie_path)
183
+ file_url = f"https://huggingface.co/{REPO}/resolve/main/{movie_path}"
184
+ proxies = get_system_proxies()
185
+ film_id = get_film_id(title)
186
+
187
+ # Start the download in a separate thread if not already downloading
188
+ if film_id not in download_threads or not download_threads[film_id].is_alive():
189
+ thread = threading.Thread(target=download_film, args=(file_url, TOKEN, cache_path, proxies, film_id, title))
190
+ download_threads[film_id] = thread
191
+ thread.start()
192
+
193
+ return jsonify({"status": "Download started", "film_id": film_id})
194
+
195
+ @app.route('/api/tv', methods=['GET'])
196
+ def get_tv_show_api():
197
+ """Endpoint to get the TV show by title, season, and episode."""
198
+ title = request.args.get('title')
199
+ season = request.args.get('season')
200
+ episode = request.args.get('episode')
201
+
202
+ if not title or not season or not episode:
203
+ return jsonify({"error": "Title, season, and episode parameters are required"}), 400
204
+
205
+ # Load the TV store JSON
206
+ with open(TV_STORE_JSON_PATH, 'r') as json_file:
207
+ tv_store_data = json.load(json_file)
208
+
209
+ # Check if the episode is already cached
210
+ if title in tv_store_data and season in tv_store_data[title]:
211
+ for ep in tv_store_data[title][season]:
212
+ if episode in ep:
213
+ cache_path = tv_store_data[title][season][ep]
214
+ if os.path.exists(cache_path):
215
+ return send_from_directory(os.path.dirname(cache_path), os.path.basename(cache_path))
216
+
217
+ tv_path = find_tv_path(file_structure, title)
218
+
219
+ if not tv_path:
220
+ return jsonify({"error": "TV show not found"}), 404
221
+
222
+ episode_path = None
223
+ for directory in file_structure:
224
+ if directory['type'] == 'directory' and directory['path'] == 'tv':
225
+ for sub_directory in directory['contents']:
226
+ if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
227
+ for season_dir in sub_directory['contents']:
228
+ if season_dir['type'] == 'directory' and season in season_dir['path']:
229
+ for episode_file in season_dir['contents']:
230
+ if episode_file['type'] == 'file' and episode in episode_file['path']:
231
+ episode_path = episode_file['path']
232
+ break
233
+
234
+ if not episode_path:
235
+ return jsonify({"error": "Episode not found"}), 404
236
+
237
+ cache_path = os.path.join(CACHE_DIR, episode_path)
238
+ file_url = f"https://huggingface.co/{REPO}/resolve/main/{episode_path}"
239
+ proxies = get_system_proxies()
240
+ episode_id = encode_episodeid(title,season,episode)
241
+
242
+ # Start the download in a separate thread if not already downloading
243
+ if episode_id not in download_threads or not download_threads[episode_id].is_alive():
244
+ thread = threading.Thread(target=download_episode, args=(file_url, TOKEN, cache_path, proxies, episode_id, title))
245
+ download_threads[episode_id] = thread
246
+ thread.start()
247
+
248
+ return jsonify({"status": "Download started", "episode_id": episode_id})
249
+
250
+
251
+ @app.route('/api/progress/<id>', methods=['GET'])
252
+ def get_progress_api(id):
253
+ """Endpoint to get the download progress of a movie or TV show episode."""
254
+ progress = get_download_progress(id)
255
+ return jsonify({"id": id, "progress": progress})
256
+
257
+ @app.route('/api/filmid', methods=['GET'])
258
+ def get_film_id_by_title_api():
259
+ """Endpoint to get the film ID by providing the movie title."""
260
+ title = request.args.get('title')
261
+ if not title:
262
+ return jsonify({"error": "Title parameter is required"}), 400
263
+ film_id = get_film_id(title)
264
+ return jsonify({"film_id": film_id})
265
+
266
+ @app.route('/api/episodeid', methods=['GET'])
267
+ def get_episode_id_api():
268
+ """Endpoint to get the episode ID by providing the TV show title, season, and episode."""
269
+ title = request.args.get('title')
270
+ season = request.args.get('season')
271
+ episode = request.args.get('episode')
272
+ if not title or not season or not episode:
273
+ return jsonify({"error": "Title, season, and episode parameters are required"}), 400
274
+ episode_id = encode_episodeid(title,season,episode)
275
+ return jsonify({"episode_id": episode_id})
276
+
277
+ @app.route('/api/cache/size', methods=['GET'])
278
+ def get_cache_size_api():
279
+ total_size = 0
280
+ for dirpath, dirnames, filenames in os.walk(CACHE_DIR):
281
+ for f in filenames:
282
+ fp = os.path.join(dirpath, f)
283
+ total_size += os.path.getsize(fp)
284
+ readable_size = bytes_to_human_readable(total_size)
285
+ return jsonify({"cache_size": readable_size})
286
+
287
+ @app.route('/api/cache/clear', methods=['POST'])
288
+ def clear_cache_api():
289
+ for dirpath, dirnames, filenames in os.walk(CACHE_DIR):
290
+ for f in filenames:
291
+ fp = os.path.join(dirpath, f)
292
+ os.remove(fp)
293
+ return jsonify({"status": "Cache cleared"})
294
+
295
+ @app.route('/api/tv/store', methods=['GET'])
296
+ def get_tv_store_api():
297
+ """Endpoint to get the TV store JSON."""
298
+ if os.path.exists(TV_STORE_JSON_PATH):
299
+ with open(TV_STORE_JSON_PATH, 'r') as json_file:
300
+ tv_store_data = json.load(json_file)
301
+ return jsonify(tv_store_data)
302
+ return jsonify({}), 404
303
+
304
+ @app.route('/api/film/store', methods=['GET'])
305
+ def get_film_store_api():
306
+ """Endpoint to get the film store JSON."""
307
+ if os.path.exists(FILM_STORE_JSON_PATH):
308
+ with open(FILM_STORE_JSON_PATH, 'r') as json_file:
309
+ tv_store_data = json.load(json_file)
310
+ return jsonify(tv_store_data)
311
+ return jsonify({}), 404
312
+
313
+ @app.route('/api/film/metadata', methods=['GET'])
314
+ def get_film_metadata_api():
315
+ """Endpoint to get the film metadata by title."""
316
+ title = request.args.get('title')
317
+ if not title:
318
+ return jsonify({'error': 'No title provided'}), 400
319
+
320
+ json_cache_path = os.path.join(CACHE_DIR, f"{urllib.parse.quote(title)}.json")
321
+
322
+ if os.path.exists(json_cache_path):
323
+ with open(json_cache_path, 'r') as f:
324
+ data = json.load(f)
325
+ return jsonify(data)
326
+
327
+ return jsonify({'error': 'Metadata not found'}), 404
328
+
329
+ @app.route('/api/tv/metadata', methods=['GET'])
330
+ def get_tv_metadata_api():
331
+ """Endpoint to get the TV show metadata by title."""
332
+ title = request.args.get('title')
333
+ if not title:
334
+ return jsonify({'error': 'No title provided'}), 400
335
+
336
+ json_cache_path = os.path.join(CACHE_DIR, f"{urllib.parse.quote(title)}.json")
337
+
338
+ if os.path.exists(json_cache_path):
339
+ with open(json_cache_path, 'r') as f:
340
+ data = json.load(f)
341
+
342
+ # Add the file structure to the metadata
343
+ tv_structure_data = get_tv_structure(file_structure, title)
344
+ if tv_structure_data:
345
+ data['file_structure'] = tv_structure_data
346
+
347
+ return jsonify(data)
348
+
349
+ return jsonify({'error': 'Metadata not found'}), 404
350
+
351
+
352
+ @app.route("/api/film/all")
353
+ def get_all_films_api():
354
+ return get_all_films(file_structure)
355
+
356
+ @app.route("/api/tv/all")
357
+ def get_all_tvshows_api():
358
+ return get_all_tv_shows(file_structure)
359
+
360
+
361
+ # Routes
362
+ @app.route('/')
363
+ def index():
364
+ return "Server Running ..."
365
+
366
+ # Main entry point
367
+ if __name__ == "__main__":
368
+ app.run(debug=True, host="0.0.0.0", port=7860)
app.py CHANGED
@@ -4,10 +4,7 @@ import os
4
  import json
5
  import threading
6
  import urllib.parse
7
- from hf_scrapper import download_film, download_episode, get_system_proxies, get_download_progress
8
- from indexer import indexer
9
- from tvdb import fetch_and_cache_json
10
- import re
11
 
12
  app = Flask(__name__)
13
  CORS(app)
@@ -16,144 +13,11 @@ CORS(app)
16
  CACHE_DIR = os.getenv("CACHE_DIR")
17
  INDEX_FILE = os.getenv("INDEX_FILE")
18
  TOKEN = os.getenv("TOKEN")
19
- FILM_STORE_JSON_PATH = os.path.join(CACHE_DIR, "film_store.json")
20
- TV_STORE_JSON_PATH = os.path.join(CACHE_DIR, "tv_store.json")
21
  REPO = os.getenv("REPO")
22
- download_threads = {}
 
23
 
24
- # Ensure CACHE_DIR exists
25
- if not os.path.exists(CACHE_DIR):
26
- os.makedirs(CACHE_DIR)
27
-
28
- for path in [FILM_STORE_JSON_PATH, TV_STORE_JSON_PATH]:
29
- if not os.path.exists(path):
30
- with open(path, 'w') as json_file:
31
- json.dump({}, json_file)
32
-
33
- # Index the file structure
34
- indexer()
35
-
36
- # Load the file structure JSON
37
- if not os.path.exists(INDEX_FILE):
38
- raise FileNotFoundError(f"{INDEX_FILE} not found. Please make sure the file exists.")
39
-
40
- with open(INDEX_FILE, 'r') as f:
41
- file_structure = json.load(f)
42
-
43
- # Function Definitions
44
-
45
- def load_json(file_path):
46
- """Load JSON data from a file."""
47
- with open(file_path, 'r') as file:
48
- return json.load(file)
49
-
50
- def find_movie_path(json_data, title):
51
- """Find the path of the movie in the JSON data based on the title."""
52
- for directory in json_data:
53
- if directory['type'] == 'directory' and directory['path'] == 'films':
54
- for sub_directory in directory['contents']:
55
- if sub_directory['type'] == 'directory':
56
- for item in sub_directory['contents']:
57
- if item['type'] == 'file' and title.lower() in item['path'].lower():
58
- return item['path']
59
- return None
60
-
61
- def find_tv_path(json_data, title):
62
- """Find the path of the TV show in the JSON data based on the title."""
63
- for directory in json_data:
64
- if directory['type'] == 'directory' and directory['path'] == 'tv':
65
- for sub_directory in directory['contents']:
66
- if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
67
- return sub_directory['path']
68
- return None
69
-
70
- def get_tv_structure(json_data,title):
71
- """Find the path of the TV show in the JSON data based on the title."""
72
- for directory in json_data:
73
- if directory['type'] == 'directory' and directory['path'] == 'tv':
74
- for sub_directory in directory['contents']:
75
- if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
76
- return sub_directory
77
- return None
78
-
79
- def get_film_id(title):
80
- """Generate a film ID based on the title."""
81
- return title.replace(" ", "_").lower()
82
-
83
- def prefetch_metadata():
84
- """Prefetch metadata for all items in the file structure."""
85
- for item in file_structure:
86
- if 'contents' in item:
87
- for sub_item in item['contents']:
88
- original_title = sub_item['path'].split('/')[-1]
89
- media_type = 'series' if item['path'].startswith('tv') else 'movie'
90
- title = original_title
91
- year = None
92
-
93
- # Extract year from the title if available
94
- match = re.search(r'\((\d{4})\)', original_title)
95
- if match:
96
- year_str = match.group(1)
97
- if year_str.isdigit() and len(year_str) == 4:
98
- title = original_title[:match.start()].strip()
99
- year = int(year_str)
100
- else:
101
- parts = original_title.rsplit(' ', 1)
102
- if len(parts) > 1 and parts[-1].isdigit() and len(parts[-1]) == 4:
103
- title = parts[0].strip()
104
- year = int(parts[-1])
105
-
106
- fetch_and_cache_json(original_title, title, media_type, year)
107
-
108
- def bytes_to_human_readable(num, suffix="B"):
109
- for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]:
110
- if abs(num) < 1024.0:
111
- return f"{num:3.1f} {unit}{suffix}"
112
- num /= 1024.0
113
- return f"{num:.1f} Y{suffix}"
114
-
115
- def encode_episodeid(title,season,episode):
116
- return f"{title}_{season}_{episode}"
117
-
118
- def get_all_tv_shows(indexed_cache):
119
- """Get all TV shows from the indexed cache structure JSON file."""
120
- tv_shows = {}
121
- for directory in indexed_cache:
122
- if directory['type'] == 'directory' and directory['path'] == 'tv':
123
- for sub_directory in directory['contents']:
124
- if sub_directory['type'] == 'directory':
125
- show_title = sub_directory['path'].split('/')[-1]
126
- tv_shows[show_title] = []
127
- for season_directory in sub_directory['contents']:
128
- if season_directory['type'] == 'directory':
129
- season = season_directory['path'].split('/')[-1]
130
- for episode in season_directory['contents']:
131
- if episode['type'] == 'file':
132
- tv_shows[show_title].append({
133
- "season": season,
134
- "episode": episode['path'].split('/')[-1],
135
- "path": episode['path']
136
- })
137
- return tv_shows
138
-
139
- def get_all_films(indexed_cache):
140
- """Get all films from the indexed cache structure JSON file."""
141
- films = []
142
- for directory in indexed_cache:
143
- if directory['type'] == 'directory' and directory['path'] == 'films':
144
- for sub_directory in directory['contents']:
145
- if sub_directory['type'] == 'directory':
146
- films.append(sub_directory['path'])
147
- return films
148
-
149
- def start_prefetching():
150
- """Start the metadata prefetching in a separate thread."""
151
- prefetch_metadata()
152
-
153
- # Start prefetching metadata
154
- thread = threading.Thread(target=start_prefetching)
155
- thread.daemon = True
156
- thread.start()
157
 
158
  # API Endpoints
159
 
@@ -165,7 +29,7 @@ def get_movie_api():
165
  return jsonify({"error": "Title parameter is required"}), 400
166
 
167
  # Load the film store JSON
168
- with open(FILM_STORE_JSON_PATH, 'r') as json_file:
169
  film_store_data = json.load(json_file)
170
 
171
  # Check if the film is already cached
@@ -174,20 +38,20 @@ def get_movie_api():
174
  if os.path.exists(cache_path):
175
  return send_from_directory(os.path.dirname(cache_path), os.path.basename(cache_path))
176
 
177
- movie_path = find_movie_path(file_structure, title)
178
 
179
  if not movie_path:
180
  return jsonify({"error": "Movie not found"}), 404
181
 
182
  cache_path = os.path.join(CACHE_DIR, movie_path)
183
  file_url = f"https://huggingface.co/{REPO}/resolve/main/{movie_path}"
184
- proxies = get_system_proxies()
185
- film_id = get_film_id(title)
186
 
187
  # Start the download in a separate thread if not already downloading
188
- if film_id not in download_threads or not download_threads[film_id].is_alive():
189
- thread = threading.Thread(target=download_film, args=(file_url, TOKEN, cache_path, proxies, film_id, title))
190
- download_threads[film_id] = thread
191
  thread.start()
192
 
193
  return jsonify({"status": "Download started", "film_id": film_id})
@@ -203,7 +67,7 @@ def get_tv_show_api():
203
  return jsonify({"error": "Title, season, and episode parameters are required"}), 400
204
 
205
  # Load the TV store JSON
206
- with open(TV_STORE_JSON_PATH, 'r') as json_file:
207
  tv_store_data = json.load(json_file)
208
 
209
  # Check if the episode is already cached
@@ -214,13 +78,13 @@ def get_tv_show_api():
214
  if os.path.exists(cache_path):
215
  return send_from_directory(os.path.dirname(cache_path), os.path.basename(cache_path))
216
 
217
- tv_path = find_tv_path(file_structure, title)
218
 
219
  if not tv_path:
220
  return jsonify({"error": "TV show not found"}), 404
221
 
222
  episode_path = None
223
- for directory in file_structure:
224
  if directory['type'] == 'directory' and directory['path'] == 'tv':
225
  for sub_directory in directory['contents']:
226
  if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
@@ -236,13 +100,13 @@ def get_tv_show_api():
236
 
237
  cache_path = os.path.join(CACHE_DIR, episode_path)
238
  file_url = f"https://huggingface.co/{REPO}/resolve/main/{episode_path}"
239
- proxies = get_system_proxies()
240
- episode_id = encode_episodeid(title,season,episode)
241
 
242
  # Start the download in a separate thread if not already downloading
243
- if episode_id not in download_threads or not download_threads[episode_id].is_alive():
244
- thread = threading.Thread(target=download_episode, args=(file_url, TOKEN, cache_path, proxies, episode_id, title))
245
- download_threads[episode_id] = thread
246
  thread.start()
247
 
248
  return jsonify({"status": "Download started", "episode_id": episode_id})
@@ -251,7 +115,7 @@ def get_tv_show_api():
251
  @app.route('/api/progress/<id>', methods=['GET'])
252
  def get_progress_api(id):
253
  """Endpoint to get the download progress of a movie or TV show episode."""
254
- progress = get_download_progress(id)
255
  return jsonify({"id": id, "progress": progress})
256
 
257
  @app.route('/api/filmid', methods=['GET'])
@@ -260,7 +124,7 @@ def get_film_id_by_title_api():
260
  title = request.args.get('title')
261
  if not title:
262
  return jsonify({"error": "Title parameter is required"}), 400
263
- film_id = get_film_id(title)
264
  return jsonify({"film_id": film_id})
265
 
266
  @app.route('/api/episodeid', methods=['GET'])
@@ -271,7 +135,7 @@ def get_episode_id_api():
271
  episode = request.args.get('episode')
272
  if not title or not season or not episode:
273
  return jsonify({"error": "Title, season, and episode parameters are required"}), 400
274
- episode_id = encode_episodeid(title,season,episode)
275
  return jsonify({"episode_id": episode_id})
276
 
277
  @app.route('/api/cache/size', methods=['GET'])
@@ -281,7 +145,7 @@ def get_cache_size_api():
281
  for f in filenames:
282
  fp = os.path.join(dirpath, f)
283
  total_size += os.path.getsize(fp)
284
- readable_size = bytes_to_human_readable(total_size)
285
  return jsonify({"cache_size": readable_size})
286
 
287
  @app.route('/api/cache/clear', methods=['POST'])
@@ -295,17 +159,17 @@ def clear_cache_api():
295
  @app.route('/api/tv/store', methods=['GET'])
296
  def get_tv_store_api():
297
  """Endpoint to get the TV store JSON."""
298
- if os.path.exists(TV_STORE_JSON_PATH):
299
- with open(TV_STORE_JSON_PATH, 'r') as json_file:
300
  tv_store_data = json.load(json_file)
301
  return jsonify(tv_store_data)
302
  return jsonify({}), 404
303
 
304
  @app.route('/api/film/store', methods=['GET'])
305
  def get_film_store_api():
306
- """Endpoint to get the TV store JSON."""
307
- if os.path.exists(FILM_STORE_JSON_PATH):
308
- with open(FILM_STORE_JSON_PATH, 'r') as json_file:
309
  tv_store_data = json.load(json_file)
310
  return jsonify(tv_store_data)
311
  return jsonify({}), 404
@@ -340,7 +204,7 @@ def get_tv_metadata_api():
340
  data = json.load(f)
341
 
342
  # Add the file structure to the metadata
343
- tv_structure_data = get_tv_structure(file_structure, title)
344
  if tv_structure_data:
345
  data['file_structure'] = tv_structure_data
346
 
@@ -351,17 +215,17 @@ def get_tv_metadata_api():
351
 
352
  @app.route("/api/film/all")
353
  def get_all_films_api():
354
- return get_all_films(file_structure)
355
 
356
  @app.route("/api/tv/all")
357
  def get_all_tvshows_api():
358
- return get_all_tv_shows(file_structure)
359
 
360
 
361
  # Routes
362
  @app.route('/')
363
  def index():
364
- return "Server Running"
365
 
366
  # Main entry point
367
  if __name__ == "__main__":
 
4
  import json
5
  import threading
6
  import urllib.parse
7
+ from Instance import Instance
 
 
 
8
 
9
  app = Flask(__name__)
10
  CORS(app)
 
13
  CACHE_DIR = os.getenv("CACHE_DIR")
14
  INDEX_FILE = os.getenv("INDEX_FILE")
15
  TOKEN = os.getenv("TOKEN")
 
 
16
  REPO = os.getenv("REPO")
17
+ ID = os.getenv("ID")
18
+ URL = os.getenv("URL")
19
 
20
+ instance = Instance(id=ID, url=URL, cache_dir=CACHE_DIR, index_file=INDEX_FILE, token=TOKEN, repo=REPO)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # API Endpoints
23
 
 
29
  return jsonify({"error": "Title parameter is required"}), 400
30
 
31
  # Load the film store JSON
32
+ with open(instance.FILM_STORE_JSON_PATH, 'r') as json_file:
33
  film_store_data = json.load(json_file)
34
 
35
  # Check if the film is already cached
 
38
  if os.path.exists(cache_path):
39
  return send_from_directory(os.path.dirname(cache_path), os.path.basename(cache_path))
40
 
41
+ movie_path = instance.find_movie_path(instance.file_structure, title)
42
 
43
  if not movie_path:
44
  return jsonify({"error": "Movie not found"}), 404
45
 
46
  cache_path = os.path.join(CACHE_DIR, movie_path)
47
  file_url = f"https://huggingface.co/{REPO}/resolve/main/{movie_path}"
48
+ proxies = instance.get_system_proxies()
49
+ film_id = instance.get_film_id(title)
50
 
51
  # Start the download in a separate thread if not already downloading
52
+ if film_id not in instance.download_threads or not instance.download_threads[film_id].is_alive():
53
+ thread = threading.Thread(target=instance.download_film, args=(file_url, TOKEN, cache_path, proxies, film_id, title))
54
+ instance.download_threads[film_id] = thread
55
  thread.start()
56
 
57
  return jsonify({"status": "Download started", "film_id": film_id})
 
67
  return jsonify({"error": "Title, season, and episode parameters are required"}), 400
68
 
69
  # Load the TV store JSON
70
+ with open(instance.TV_STORE_JSON_PATH, 'r') as json_file:
71
  tv_store_data = json.load(json_file)
72
 
73
  # Check if the episode is already cached
 
78
  if os.path.exists(cache_path):
79
  return send_from_directory(os.path.dirname(cache_path), os.path.basename(cache_path))
80
 
81
+ tv_path = instance.find_tv_path(instance.file_structure, title)
82
 
83
  if not tv_path:
84
  return jsonify({"error": "TV show not found"}), 404
85
 
86
  episode_path = None
87
+ for directory in instance.file_structure:
88
  if directory['type'] == 'directory' and directory['path'] == 'tv':
89
  for sub_directory in directory['contents']:
90
  if sub_directory['type'] == 'directory' and title.lower() in sub_directory['path'].lower():
 
100
 
101
  cache_path = os.path.join(CACHE_DIR, episode_path)
102
  file_url = f"https://huggingface.co/{REPO}/resolve/main/{episode_path}"
103
+ proxies = instance.get_system_proxies()
104
+ episode_id = instance.encode_episodeid(title,season,episode)
105
 
106
  # Start the download in a separate thread if not already downloading
107
+ if episode_id not in instance.download_threads or not instance.download_threads[episode_id].is_alive():
108
+ thread = threading.Thread(target=instance.download_episode, args=(file_url, TOKEN, cache_path, proxies, episode_id, title))
109
+ instance.download_threads[episode_id] = thread
110
  thread.start()
111
 
112
  return jsonify({"status": "Download started", "episode_id": episode_id})
 
115
  @app.route('/api/progress/<id>', methods=['GET'])
116
  def get_progress_api(id):
117
  """Endpoint to get the download progress of a movie or TV show episode."""
118
+ progress = instance.get_download_progress(id)
119
  return jsonify({"id": id, "progress": progress})
120
 
121
  @app.route('/api/filmid', methods=['GET'])
 
124
  title = request.args.get('title')
125
  if not title:
126
  return jsonify({"error": "Title parameter is required"}), 400
127
+ film_id = instance.get_film_id(title)
128
  return jsonify({"film_id": film_id})
129
 
130
  @app.route('/api/episodeid', methods=['GET'])
 
135
  episode = request.args.get('episode')
136
  if not title or not season or not episode:
137
  return jsonify({"error": "Title, season, and episode parameters are required"}), 400
138
+ episode_id = instance.encode_episodeid(title,season,episode)
139
  return jsonify({"episode_id": episode_id})
140
 
141
  @app.route('/api/cache/size', methods=['GET'])
 
145
  for f in filenames:
146
  fp = os.path.join(dirpath, f)
147
  total_size += os.path.getsize(fp)
148
+ readable_size = instance.bytes_to_human_readable(total_size)
149
  return jsonify({"cache_size": readable_size})
150
 
151
  @app.route('/api/cache/clear', methods=['POST'])
 
159
  @app.route('/api/tv/store', methods=['GET'])
160
  def get_tv_store_api():
161
  """Endpoint to get the TV store JSON."""
162
+ if os.path.exists(instance.TV_STORE_JSON_PATH):
163
+ with open(instance.TV_STORE_JSON_PATH, 'r') as json_file:
164
  tv_store_data = json.load(json_file)
165
  return jsonify(tv_store_data)
166
  return jsonify({}), 404
167
 
168
  @app.route('/api/film/store', methods=['GET'])
169
  def get_film_store_api():
170
+ """Endpoint to get the film store JSON."""
171
+ if os.path.exists(instance.FILM_STORE_JSON_PATH):
172
+ with open(instance.FILM_STORE_JSON_PATH, 'r') as json_file:
173
  tv_store_data = json.load(json_file)
174
  return jsonify(tv_store_data)
175
  return jsonify({}), 404
 
204
  data = json.load(f)
205
 
206
  # Add the file structure to the metadata
207
+ tv_structure_data = instance.get_tv_structure(instance.file_structure, title)
208
  if tv_structure_data:
209
  data['file_structure'] = tv_structure_data
210
 
 
215
 
216
  @app.route("/api/film/all")
217
  def get_all_films_api():
218
+ return instance.get_all_films(instance.file_structure)
219
 
220
  @app.route("/api/tv/all")
221
  def get_all_tvshows_api():
222
+ return instance.get_all_tv_shows(instance.file_structure)
223
 
224
 
225
  # Routes
226
  @app.route('/')
227
  def index():
228
+ return "Server Running ..."
229
 
230
  # Main entry point
231
  if __name__ == "__main__":