Ilzhabimantara commited on
Commit
6a9452a
·
verified ·
1 Parent(s): 891bd38

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +28 -21
config.py CHANGED
@@ -1,5 +1,4 @@
1
  import argparse
2
- import sys
3
  import torch
4
  from multiprocessing import cpu_count
5
 
@@ -11,38 +10,45 @@ class Config:
11
  self.gpu_name = None
12
  self.gpu_mem = None
13
  (
 
 
14
  self.colab,
15
- self.api,
16
- self.unsupported
 
17
  ) = self.arg_parse()
18
  self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
19
 
20
  @staticmethod
21
  def arg_parse() -> tuple:
22
  parser = argparse.ArgumentParser()
 
 
 
 
23
  parser.add_argument("--colab", action="store_true", help="Launch in colab")
 
 
 
 
 
 
 
 
24
  parser.add_argument("--api", action="store_true", help="Launch with api")
25
- parser.add_argument("--unsupported", action="store_true", help="Enable unsupported feature")
26
  cmd_opts = parser.parse_args()
27
 
 
 
28
  return (
 
 
29
  cmd_opts.colab,
30
- cmd_opts.api,
31
- cmd_opts.unsupported
 
32
  )
33
 
34
- # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
35
- # check `getattr` and try it for compatibility
36
- @staticmethod
37
- def has_mps() -> bool:
38
- if not torch.backends.mps.is_available():
39
- return False
40
- try:
41
- torch.zeros(1).to(torch.device("mps"))
42
- return True
43
- except Exception:
44
- return False
45
-
46
  def device_config(self) -> tuple:
47
  if torch.cuda.is_available():
48
  i_device = int(self.device.split(":")[-1])
@@ -54,10 +60,11 @@ class Config:
54
  or "1070" in self.gpu_name
55
  or "1080" in self.gpu_name
56
  ):
57
- print("INFO: Found GPU", self.gpu_name, ", force to fp32")
58
  self.is_half = False
 
59
  else:
60
- print("INFO: Found GPU", self.gpu_name)
61
  self.gpu_mem = int(
62
  torch.cuda.get_device_properties(i_device).total_memory
63
  / 1024
@@ -65,7 +72,7 @@ class Config:
65
  / 1024
66
  + 0.4
67
  )
68
- elif self.has_mps():
69
  print("INFO: No supported Nvidia GPU found, use MPS instead")
70
  self.device = "mps"
71
  self.is_half = False
 
1
  import argparse
 
2
  import torch
3
  from multiprocessing import cpu_count
4
 
 
10
  self.gpu_name = None
11
  self.gpu_mem = None
12
  (
13
+ self.python_cmd,
14
+ self.listen_port,
15
  self.colab,
16
+ self.noparallel,
17
+ self.noautoopen,
18
+ self.api
19
  ) = self.arg_parse()
20
  self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
21
 
22
  @staticmethod
23
  def arg_parse() -> tuple:
24
  parser = argparse.ArgumentParser()
25
+ parser.add_argument("--port", type=int, default=7865, help="Listen port")
26
+ parser.add_argument(
27
+ "--pycmd", type=str, default="python", help="Python command"
28
+ )
29
  parser.add_argument("--colab", action="store_true", help="Launch in colab")
30
+ parser.add_argument(
31
+ "--noparallel", action="store_true", help="Disable parallel processing"
32
+ )
33
+ parser.add_argument(
34
+ "--noautoopen",
35
+ action="store_true",
36
+ help="Do not open in browser automatically",
37
+ )
38
  parser.add_argument("--api", action="store_true", help="Launch with api")
 
39
  cmd_opts = parser.parse_args()
40
 
41
+ cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
42
+
43
  return (
44
+ cmd_opts.pycmd,
45
+ cmd_opts.port,
46
  cmd_opts.colab,
47
+ cmd_opts.noparallel,
48
+ cmd_opts.noautoopen,
49
+ cmd_opts.api
50
  )
51
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  def device_config(self) -> tuple:
53
  if torch.cuda.is_available():
54
  i_device = int(self.device.split(":")[-1])
 
60
  or "1070" in self.gpu_name
61
  or "1080" in self.gpu_name
62
  ):
63
+ print("16 series/10 series graphics cards and P40 force single precision")
64
  self.is_half = False
65
+
66
  else:
67
+ self.gpu_name = None
68
  self.gpu_mem = int(
69
  torch.cuda.get_device_properties(i_device).total_memory
70
  / 1024
 
72
  / 1024
73
  + 0.4
74
  )
75
+ elif torch.backends.mps.is_available():
76
  print("INFO: No supported Nvidia GPU found, use MPS instead")
77
  self.device = "mps"
78
  self.is_half = False