Vijish commited on
Commit
5272442
1 Parent(s): 9c20444

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +21 -54
config.py CHANGED
@@ -3,7 +3,6 @@ import sys
3
  import torch
4
  from multiprocessing import cpu_count
5
 
6
-
7
  class Config:
8
  def __init__(self):
9
  self.device = "cuda:0"
@@ -11,54 +10,13 @@ class Config:
11
  self.n_cpu = 0
12
  self.gpu_name = None
13
  self.gpu_mem = None
14
- (
15
- self.python_cmd,
16
- self.listen_port,
17
- self.iscolab,
18
- self.noparallel,
19
- self.noautoopen,
20
- ) = self.arg_parse()
21
  self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
22
 
23
- @staticmethod
24
- def arg_parse() -> tuple:
25
- exe = sys.executable or "python"
26
- parser = argparse.ArgumentParser()
27
- parser.add_argument("--port", type=int, default=7865, help="Listen port")
28
- parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
29
- parser.add_argument("--colab", action="store_true", help="Launch in colab")
30
- parser.add_argument(
31
- "--noparallel", action="store_true", help="Disable parallel processing"
32
- )
33
- parser.add_argument(
34
- "--noautoopen",
35
- action="store_true",
36
- help="Do not open in browser automatically",
37
- )
38
- cmd_opts = parser.parse_args()
39
-
40
- cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
41
-
42
- return (
43
- cmd_opts.pycmd,
44
- cmd_opts.port,
45
- cmd_opts.colab,
46
- cmd_opts.noparallel,
47
- cmd_opts.noautoopen,
48
- )
49
-
50
- # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
51
- # check `getattr` and try it for compatibility
52
- @staticmethod
53
- def has_mps() -> bool:
54
- if not torch.backends.mps.is_available():
55
- return False
56
- try:
57
- torch.zeros(1).to(torch.device("mps"))
58
- return True
59
- except Exception:
60
- return False
61
-
62
  def device_config(self) -> tuple:
63
  if torch.cuda.is_available():
64
  i_device = int(self.device.split(":")[-1])
@@ -69,17 +27,14 @@ class Config:
69
  or "1060" in self.gpu_name
70
  or "1070" in self.gpu_name
71
  or "1080" in self.gpu_name
 
72
  ):
73
  print("Found GPU", self.gpu_name, ", force to fp32")
74
  self.is_half = False
75
  else:
76
  print("Found GPU", self.gpu_name)
77
  self.gpu_mem = int(
78
- torch.cuda.get_device_properties(i_device).total_memory
79
- / 1024
80
- / 1024
81
- / 1024
82
- + 0.4
83
  )
84
  elif self.has_mps():
85
  print("No supported Nvidia GPU found, use MPS instead")
@@ -106,10 +61,22 @@ class Config:
106
  x_center = 38
107
  x_max = 41
108
 
109
- if self.gpu_mem != None and self.gpu_mem <= 4:
110
  x_pad = 1
111
  x_query = 5
112
  x_center = 30
113
  x_max = 32
114
 
115
- return x_pad, x_query, x_center, x_max
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import torch
4
  from multiprocessing import cpu_count
5
 
 
6
  class Config:
7
  def __init__(self):
8
  self.device = "cuda:0"
 
10
  self.n_cpu = 0
11
  self.gpu_name = None
12
  self.gpu_mem = None
13
+ self.python_cmd = "python"
14
+ self.listen_port = 7865
15
+ self.iscolab = False
16
+ self.noparallel = False
17
+ self.noautoopen = False
 
 
18
  self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  def device_config(self) -> tuple:
21
  if torch.cuda.is_available():
22
  i_device = int(self.device.split(":")[-1])
 
27
  or "1060" in self.gpu_name
28
  or "1070" in self.gpu_name
29
  or "1080" in self.gpu_name
30
+ or "T4" in self.gpu_name.upper() # Add this line to check for T4 GPU
31
  ):
32
  print("Found GPU", self.gpu_name, ", force to fp32")
33
  self.is_half = False
34
  else:
35
  print("Found GPU", self.gpu_name)
36
  self.gpu_mem = int(
37
+ torch.cuda.get_device_properties(i_device).total_memory / 1024 / 1024 / 1024 + 0.4
 
 
 
 
38
  )
39
  elif self.has_mps():
40
  print("No supported Nvidia GPU found, use MPS instead")
 
61
  x_center = 38
62
  x_max = 41
63
 
64
+ if self.gpu_mem is not None and self.gpu_mem <= 4:
65
  x_pad = 1
66
  x_query = 5
67
  x_center = 30
68
  x_max = 32
69
 
70
+ return x_pad, x_query, x_center, x_max
71
+
72
+ # has_mps is only available in nightly pytorch (for now) and macOS 12.3+.
73
+ # check `getattr` and try it for compatibility
74
+ @staticmethod
75
+ def has_mps() -> bool:
76
+ if not torch.backends.mps.is_available():
77
+ return False
78
+ try:
79
+ torch.zeros(1).to(torch.device("mps"))
80
+ return True
81
+ except Exception:
82
+ return False