alessandro trinca tornidor commited on
Commit
8e6ebf2
·
1 Parent(s): cd399d1

remove use of @session_logger.set_uuid_logging decorator

Browse files
app.py CHANGED
@@ -5,31 +5,20 @@ import sys
5
  from pathlib import Path
6
 
7
  import gradio as gr
8
- import structlog
9
  import uvicorn
10
- from dotenv import load_dotenv
11
  from fastapi import FastAPI
12
  from fastapi.responses import FileResponse
13
  from fastapi.staticfiles import StaticFiles
14
  from fastapi.templating import Jinja2Templates
15
  from samgis_core.utilities import create_folders_if_not_exists
16
  from samgis_core.utilities import frontend_builder
17
- from samgis_core.utilities.session_logger import setup_logging
18
  # for some reason this do
19
  from spaces import GPU as SPACES_GPU
20
 
 
21
  from lisa_on_cuda.utils import app_helpers
22
- from lisa_on_cuda import routes
23
 
24
 
25
- load_dotenv()
26
- project_root_folder = Path(globals().get("__file__", "./_")).absolute().parent
27
- workdir = os.getenv("WORKDIR", project_root_folder)
28
- model_folder = Path(project_root_folder / "machine_learning_models")
29
-
30
- log_level = os.getenv("LOG_LEVEL", "INFO")
31
- setup_logging(log_level=log_level)
32
- app_logger = structlog.stdlib.get_logger()
33
  app_logger.info(f"PROJECT_ROOT_FOLDER:{project_root_folder}, WORKDIR:{workdir}.")
34
 
35
  folders_map = os.getenv("FOLDERS_MAP", "{}")
 
5
  from pathlib import Path
6
 
7
  import gradio as gr
 
8
  import uvicorn
 
9
  from fastapi import FastAPI
10
  from fastapi.responses import FileResponse
11
  from fastapi.staticfiles import StaticFiles
12
  from fastapi.templating import Jinja2Templates
13
  from samgis_core.utilities import create_folders_if_not_exists
14
  from samgis_core.utilities import frontend_builder
 
15
  # for some reason this do
16
  from spaces import GPU as SPACES_GPU
17
 
18
+ from lisa_on_cuda import routes, app_logger, project_root_folder, workdir
19
  from lisa_on_cuda.utils import app_helpers
 
20
 
21
 
 
 
 
 
 
 
 
 
22
  app_logger.info(f"PROJECT_ROOT_FOLDER:{project_root_folder}, WORKDIR:{workdir}.")
23
 
24
  folders_map = os.getenv("FOLDERS_MAP", "{}")
lisa_on_cuda/__init__.py CHANGED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ import structlog
5
+ from dotenv import load_dotenv
6
+ from samgis_core.utilities.session_logger import setup_logging
7
+
8
+
9
+ load_dotenv()
10
+ project_root_folder = Path(globals().get("__file__", "./_")).absolute().parent
11
+ workdir = os.getenv("WORKDIR", project_root_folder)
12
+ model_folder = Path(project_root_folder / "machine_learning_models")
13
+
14
+ log_level = os.getenv("LOG_LEVEL", "INFO")
15
+ setup_logging(log_level=log_level)
16
+ app_logger = structlog.stdlib.get_logger()
lisa_on_cuda/routes.py CHANGED
@@ -1,18 +1,10 @@
1
  import json
2
- import os
3
 
4
- import structlog
5
- from dotenv import load_dotenv
6
  from fastapi import APIRouter
7
 
8
- from samgis_core.utilities.session_logger import setup_logging
9
 
10
 
11
- load_dotenv()
12
-
13
- log_level = os.getenv("LOG_LEVEL", "INFO")
14
- setup_logging(log_level=log_level)
15
- app_logger = structlog.stdlib.get_logger()
16
  router = APIRouter()
17
 
18
 
 
1
  import json
 
2
 
 
 
3
  from fastapi import APIRouter
4
 
5
+ from lisa_on_cuda import app_logger
6
 
7
 
 
 
 
 
 
8
  router = APIRouter()
9
 
10
 
lisa_on_cuda/utils/app_helpers.py CHANGED
@@ -3,6 +3,7 @@ import logging
3
  import os
4
  import re
5
  from typing import Callable
 
6
  import cv2
7
  import gradio as gr
8
  import nh3
@@ -11,18 +12,16 @@ import torch
11
  import torch.nn.functional as F
12
  from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
13
 
14
- from . import constants, session_logger, utils
15
  from lisa_on_cuda.LISA import LISAForCausalLM
16
  from lisa_on_cuda.llava import conversation as conversation_lib
17
  from lisa_on_cuda.llava.mm_utils import tokenizer_image_token
18
  from lisa_on_cuda.segment_anything.utils.transforms import ResizeLongestSide
19
-
20
 
21
  placeholders = utils.create_placeholder_variables()
22
- app_logger = logging.getLogger(__name__)
23
 
24
 
25
- @session_logger.set_uuid_logging
26
  def parse_args(args_to_parse, internal_logger=None):
27
  if internal_logger is None:
28
  internal_logger = app_logger
@@ -56,7 +55,6 @@ def parse_args(args_to_parse, internal_logger=None):
56
  return parser.parse_args(args_to_parse)
57
 
58
 
59
- @session_logger.set_uuid_logging
60
  def get_cleaned_input(input_str, internal_logger=None):
61
  if internal_logger is None:
62
  internal_logger = app_logger
@@ -89,7 +87,6 @@ def get_cleaned_input(input_str, internal_logger=None):
89
  return input_str
90
 
91
 
92
- @session_logger.set_uuid_logging
93
  def set_image_precision_by_args(input_image, precision):
94
  if precision == "bf16":
95
  input_image = input_image.bfloat16()
@@ -100,7 +97,6 @@ def set_image_precision_by_args(input_image, precision):
100
  return input_image
101
 
102
 
103
- @session_logger.set_uuid_logging
104
  def preprocess(
105
  x,
106
  pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
@@ -120,7 +116,6 @@ def preprocess(
120
  return x
121
 
122
 
123
- @session_logger.set_uuid_logging
124
  def load_model_for_causal_llm_pretrained(
125
  version, torch_dtype, load_in_8bit, load_in_4bit, seg_token_idx, vision_tower,
126
  internal_logger: logging = None
@@ -166,7 +161,6 @@ def load_model_for_causal_llm_pretrained(
166
  return _model
167
 
168
 
169
- @session_logger.set_uuid_logging
170
  def get_model(args_to_parse, internal_logger: logging = None, inference_decorator: Callable = None):
171
  if internal_logger is None:
172
  internal_logger = app_logger
@@ -238,7 +232,6 @@ def get_model(args_to_parse, internal_logger: logging = None, inference_decorato
238
  return _model, _clip_image_processor, _tokenizer, _transform
239
 
240
 
241
- @session_logger.set_uuid_logging
242
  def prepare_model_vision_tower(_model, args_to_parse, torch_dtype, internal_logger: logging = None):
243
  if internal_logger is None:
244
  internal_logger = app_logger
@@ -272,7 +265,6 @@ def prepare_model_vision_tower(_model, args_to_parse, torch_dtype, internal_logg
272
  return _model, vision_tower
273
 
274
 
275
- @session_logger.set_uuid_logging
276
  def get_inference_model_by_args(args_to_parse, internal_logger0: logging = None, inference_decorator: Callable = None):
277
  if internal_logger0 is None:
278
  internal_logger0 = app_logger
@@ -281,7 +273,6 @@ def get_inference_model_by_args(args_to_parse, internal_logger0: logging = None,
281
  internal_logger0.info("created model, preparing inference function")
282
  no_seg_out = placeholders["no_seg_out"]
283
 
284
- @session_logger.set_uuid_logging
285
  def inference(
286
  input_str: str,
287
  input_image: str | np.ndarray,
@@ -408,7 +399,6 @@ def get_inference_model_by_args(args_to_parse, internal_logger0: logging = None,
408
  return inference
409
 
410
 
411
- @session_logger.set_uuid_logging
412
  def get_gradio_interface(
413
  fn_inference: Callable,
414
  args: str = None
 
3
  import os
4
  import re
5
  from typing import Callable
6
+
7
  import cv2
8
  import gradio as gr
9
  import nh3
 
12
  import torch.nn.functional as F
13
  from transformers import AutoTokenizer, BitsAndBytesConfig, CLIPImageProcessor
14
 
15
+ from lisa_on_cuda import app_logger
16
  from lisa_on_cuda.LISA import LISAForCausalLM
17
  from lisa_on_cuda.llava import conversation as conversation_lib
18
  from lisa_on_cuda.llava.mm_utils import tokenizer_image_token
19
  from lisa_on_cuda.segment_anything.utils.transforms import ResizeLongestSide
20
+ from . import constants, utils
21
 
22
  placeholders = utils.create_placeholder_variables()
 
23
 
24
 
 
25
  def parse_args(args_to_parse, internal_logger=None):
26
  if internal_logger is None:
27
  internal_logger = app_logger
 
55
  return parser.parse_args(args_to_parse)
56
 
57
 
 
58
  def get_cleaned_input(input_str, internal_logger=None):
59
  if internal_logger is None:
60
  internal_logger = app_logger
 
87
  return input_str
88
 
89
 
 
90
  def set_image_precision_by_args(input_image, precision):
91
  if precision == "bf16":
92
  input_image = input_image.bfloat16()
 
97
  return input_image
98
 
99
 
 
100
  def preprocess(
101
  x,
102
  pixel_mean=torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1),
 
116
  return x
117
 
118
 
 
119
  def load_model_for_causal_llm_pretrained(
120
  version, torch_dtype, load_in_8bit, load_in_4bit, seg_token_idx, vision_tower,
121
  internal_logger: logging = None
 
161
  return _model
162
 
163
 
 
164
  def get_model(args_to_parse, internal_logger: logging = None, inference_decorator: Callable = None):
165
  if internal_logger is None:
166
  internal_logger = app_logger
 
232
  return _model, _clip_image_processor, _tokenizer, _transform
233
 
234
 
 
235
  def prepare_model_vision_tower(_model, args_to_parse, torch_dtype, internal_logger: logging = None):
236
  if internal_logger is None:
237
  internal_logger = app_logger
 
265
  return _model, vision_tower
266
 
267
 
 
268
  def get_inference_model_by_args(args_to_parse, internal_logger0: logging = None, inference_decorator: Callable = None):
269
  if internal_logger0 is None:
270
  internal_logger0 = app_logger
 
273
  internal_logger0.info("created model, preparing inference function")
274
  no_seg_out = placeholders["no_seg_out"]
275
 
 
276
  def inference(
277
  input_str: str,
278
  input_image: str | np.ndarray,
 
399
  return inference
400
 
401
 
 
402
  def get_gradio_interface(
403
  fn_inference: Callable,
404
  args: str = None