lyangas commited on
Commit
185291e
·
1 Parent(s): 9ac5368

unused files removed

Browse files
Files changed (3) hide show
  1. helpers/firebase.py +0 -148
  2. helpers/gcloud.py +0 -98
  3. pyproject.toml +0 -4
helpers/firebase.py DELETED
@@ -1,148 +0,0 @@
1
- import firebase_admin
2
- from firebase_admin import credentials
3
- from firebase_admin import firestore
4
-
5
-
6
- class FirebaseClient:
7
- def __init__(self, path_to_certificate):
8
- # Initialize Firebase Admin SDK
9
- cred = credentials.Certificate(path_to_certificate) # Path to your service account key JSON file
10
- firebase_admin.initialize_app(cred)
11
-
12
- # Initialize Firestore database
13
- self.db = firestore.client()
14
-
15
- def add_task(self, task_data):
16
- """
17
- Add a new task to Firestore.
18
-
19
- Args:
20
- task_data (dict): Dictionary containing task data.
21
- Example: {'title': 'Task Title', 'description': 'Task Description', 'status': 'pending'}
22
- """
23
- # Add task data to Firestore
24
- doc_ref = self.db.collection('tasks').document()
25
- doc_ref.set(task_data)
26
- return doc_ref.id
27
-
28
- def get_task_by_status(self, status):
29
- # Reference to the tasks collection
30
- tasks_ref = self.db.collection('tasks')
31
-
32
- # Query tasks with status 'pending'
33
- query = tasks_ref.where('status', '==', status)
34
-
35
- # Get documents that match the query
36
- pending_tasks = query.stream()
37
-
38
- # Convert documents to dictionaries
39
- pending_tasks_data = []
40
- for doc in pending_tasks:
41
- task_data = doc.to_dict()
42
- task_data['id'] = doc.id
43
- pending_tasks_data.append(task_data)
44
-
45
- return pending_tasks_data
46
-
47
- def get_all_tasks(self):
48
- """
49
- Retrieve all tasks from Firestore.
50
-
51
- Returns:
52
- list: A list containing dictionaries, each representing a task.
53
- """
54
- # Reference to the 'tasks' collection
55
- tasks_ref = self.db.collection('tasks')
56
-
57
- # Get all documents in the collection
58
- docs = tasks_ref.stream()
59
-
60
- # Initialize an empty list to store tasks
61
- tasks = []
62
-
63
- # Iterate over each document and add it to the tasks list
64
- for doc in docs:
65
- doc_dict = doc.to_dict()
66
- doc_dict['id'] = doc.id
67
- tasks.append(doc_dict)
68
-
69
- return tasks
70
-
71
- def update(self, task_id, data):
72
- """
73
- Reserve a task by a worker.
74
-
75
- Args:
76
- task_id (str): ID of the task to be reserved.
77
- worker_id (str): ID of the worker reserving the task.
78
- """
79
- # Reference to the task document
80
- task_ref = self.db.collection('tasks').document(task_id)
81
-
82
- # Update the task document to indicate it has been reserved by the worker
83
- task_ref.update(data)
84
-
85
- def delete_task(self, task_id):
86
- """
87
- Delete a task from Firestore by its ID.
88
-
89
- Args:
90
- task_id (str): ID of the task to be deleted.
91
- """
92
- # Reference to the task document
93
- task_ref = self.db.collection('tasks').document(task_id)
94
-
95
- # Delete the task document
96
- task_ref.delete()
97
-
98
- def get_task_by_id(self, task_id):
99
- """
100
- Retrieve a task from Firestore by its ID.
101
-
102
- Args:
103
- task_id (str): ID of the task to be retrieved.
104
-
105
- Returns:
106
- dict or None: Dictionary containing the task data if found, None otherwise.
107
- """
108
- # Reference to the task document
109
- task_ref = self.db.collection('tasks').document(task_id)
110
-
111
- # Retrieve the task document
112
- task_doc = task_ref.get()
113
-
114
- # Check if the task document exists
115
- if task_doc.exists:
116
- return task_doc.to_dict()
117
- else:
118
- return None
119
-
120
- def find_tasks_by_status(self, status):
121
- """
122
- Find all tasks in Firestore with the specified status.
123
-
124
- Args:
125
- status (str): Status value to filter tasks by.
126
-
127
- Returns:
128
- list: List of dictionaries containing task data.
129
- """
130
- # Reference to the 'tasks' collection
131
- tasks_ref = self.db.collection('tasks')
132
-
133
- # Query tasks with the specified status
134
- query = tasks_ref.where('status', '==', status)
135
-
136
- # Get documents that match the query
137
- docs = query.stream()
138
-
139
- # Initialize an empty list to store tasks
140
- tasks = []
141
-
142
- # Iterate over each document and add it to the tasks list
143
- for doc in docs:
144
- task = doc.to_dict()
145
- task['id'] = doc.id
146
- tasks.append(task)
147
-
148
- return tasks
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
helpers/gcloud.py DELETED
@@ -1,98 +0,0 @@
1
- import os
2
- from google.cloud import storage
3
- from tqdm import tqdm
4
- from googleapiclient import discovery
5
- import requests
6
-
7
-
8
- service = discovery.build('compute', 'v1')
9
- storage_client = storage.Client()
10
-
11
- def download_csv_from_gcloud(bucket_name, object_name, destination_file_path):
12
- """Download a file from Google Cloud Storage."""
13
-
14
- bucket = storage_client.bucket(bucket_name)
15
- blob = bucket.blob(object_name)
16
-
17
- # Download the file to a local path
18
- blob.download_to_filename(destination_file_path)
19
- print(f"File {object_name} downloaded to {destination_file_path}")
20
-
21
- def upload_folder_to_gcloud(bucket_name, source_folder_path, destination_folder_name):
22
- """Uploads all files in a folder to the Google Cloud Storage bucket."""
23
- # Instantiates a client
24
- # storage_client = storage.Client()
25
-
26
- # Gets the bucket
27
- print(f"bucket_name={bucket_name}, source_folder_path={source_folder_path}, destination_folder_name={destination_folder_name}", flush=True)
28
- bucket = storage_client.bucket(bucket_name)
29
-
30
- # Walk through the folder and upload each file
31
- for root, _, files in os.walk(source_folder_path):
32
- for file_name in files:
33
- # Construct the local file path
34
- local_file_path = os.path.join(root, file_name)
35
-
36
- # Construct the destination blob name
37
- destination_blob_name = os.path.join(destination_folder_name, os.path.relpath(local_file_path, source_folder_path))
38
- print(f"destination_blob_name={destination_blob_name}")
39
- # Upload the file
40
- blob = bucket.blob(destination_blob_name)
41
- blob.upload_from_filename(local_file_path)
42
-
43
- print(f"File {local_file_path} uploaded to {destination_blob_name}.")
44
-
45
-
46
- def download_folder(bucket_name, folder_name, destination_directory):
47
- """
48
- Download the contents of a folder from a Google Cloud Storage bucket to a local directory.
49
-
50
- Args:
51
- bucket_name (str): Name of the Google Cloud Storage bucket.
52
- folder_name (str): Name of the folder in the bucket to download.
53
- destination_directory (str): Local directory to save the downloaded files.
54
- """
55
-
56
- # Get the bucket
57
- bucket = storage_client.get_bucket(bucket_name)
58
-
59
- # List objects in the folder
60
- blobs = bucket.list_blobs(prefix=folder_name)
61
-
62
- # Ensure destination directory exists
63
- os.makedirs(destination_directory, exist_ok=True)
64
-
65
- # Iterate over each object in the folder
66
- for blob in tqdm(blobs, desc=f'Downloading {folder_name}'):
67
- # Determine local file path
68
- local_file_path = os.path.join(destination_directory, os.path.relpath(blob.name, folder_name))
69
-
70
- # Ensure local directory exists
71
- os.makedirs(os.path.dirname(local_file_path), exist_ok=True)
72
-
73
- # Download the object to a local file
74
- blob.download_to_filename(local_file_path)
75
-
76
-
77
- def start_vm(project, zone, instance):
78
- request = service.instances().start(project=project, zone=zone, instance=instance)
79
- response = request.execute()
80
- return response
81
-
82
- def stop_vm(project, zone, instance):
83
- request = service.instances().stop(project=project, zone=zone, instance=instance)
84
- response = request.execute()
85
- return response
86
-
87
- def get_current_instance_name():
88
- # URL for the metadata server
89
- METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/instance/name"
90
- HEADERS = {"Metadata-Flavor": "Google"}
91
- try:
92
- response = requests.get(METADATA_URL, headers=HEADERS)
93
- response.raise_for_status() # Raise an error for bad status codes
94
- instance_name = response.text
95
- return instance_name
96
- except requests.exceptions.RequestException as e:
97
- print(f"Error fetching instance name: {e}")
98
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
pyproject.toml CHANGED
@@ -20,11 +20,7 @@ matplotlib = "3.8.2"
20
  plotly = "5.18.0"
21
  protobuf = "4.25.2"
22
  tqdm = "4.66.1"
23
- google-cloud-storage = "^2.14.0"
24
  tinydb = "^4.8.0"
25
- uvicorn = "^0.27.0.post1"
26
- fastapi = "^0.109.2"
27
- firebase-admin = "^6.5.0"
28
 
29
  [tool.poetry.dev-dependencies]
30
 
 
20
  plotly = "5.18.0"
21
  protobuf = "4.25.2"
22
  tqdm = "4.66.1"
 
23
  tinydb = "^4.8.0"
 
 
 
24
 
25
  [tool.poetry.dev-dependencies]
26