Gregor Betz commited on
Commit
c4b5a1a
1 Parent(s): a7071a7

add backends / api key secrets

Browse files
Files changed (2) hide show
  1. backend/config.py +19 -10
  2. config.yaml +2 -0
backend/config.py CHANGED
@@ -3,8 +3,14 @@ import os
3
 
4
 
5
  def process_config(config):
6
- if "HF_TOKEN" not in os.environ:
7
- raise ValueError("Please set the HF_TOKEN environment variable.")
 
 
 
 
 
 
8
  client_kwargs = {}
9
  if "client_llm" in config:
10
  if "model_id" in config["client_llm"]:
@@ -15,8 +21,11 @@ def process_config(config):
15
  client_kwargs["inference_server_url"] = config["client_llm"]["url"]
16
  else:
17
  raise ValueError("config.yaml is missing client url.")
18
- client_kwargs["api_key"] = os.getenv("HF_TOKEN")
19
- client_kwargs["llm_backend"] = "HFChat"
 
 
 
20
  client_kwargs["temperature"] = config["client_llm"].get("temperature",.6)
21
  client_kwargs["max_tokens"] = config["client_llm"].get("max_tokens",800)
22
  else:
@@ -32,8 +41,11 @@ def process_config(config):
32
  guide_kwargs["inference_server_url"] = config["expert_llm"]["url"]
33
  else:
34
  raise ValueError("config.yaml is missing expert url.")
35
- guide_kwargs["api_key"] = os.getenv("HF_TOKEN")
36
- guide_kwargs["llm_backend"] = "HFChat"
 
 
 
37
  else:
38
  raise ValueError("config.yaml is missing expert_llm settings.")
39
 
@@ -50,12 +62,9 @@ def process_config(config):
50
  guide_kwargs["classifier_kwargs"]["batch_size"] = int(config["classifier_llm"]["batch_size"])
51
  else:
52
  raise ValueError("config.yaml is missing classifier batch_size.")
53
- guide_kwargs["classifier_kwargs"]["api_key"] = os.getenv("HF_TOKEN") # classifier api key
54
  else:
55
  raise ValueError("config.yaml is missing classifier_llm settings.")
56
 
57
- logging.info(f"client_kwargs: {client_kwargs}")
58
- logging.info(f"guide_kwargs: {guide_kwargs}")
59
-
60
  return client_kwargs, guide_kwargs
61
 
 
3
 
4
 
5
  def process_config(config):
6
+
7
+ if "CLIENT_TOKEN" not in os.environ:
8
+ raise ValueError("Please set the CLIENT_TOKEN environment variable.")
9
+ if "GUIDE_TOKEN" not in os.environ:
10
+ raise ValueError("Please set the GUIDE_TOKEN environment variable.")
11
+ if "CLASSIFIER_TOKEN" not in os.environ:
12
+ raise ValueError("Please set the CLASSIFIER_TOKEN environment variable.")
13
+
14
  client_kwargs = {}
15
  if "client_llm" in config:
16
  if "model_id" in config["client_llm"]:
 
21
  client_kwargs["inference_server_url"] = config["client_llm"]["url"]
22
  else:
23
  raise ValueError("config.yaml is missing client url.")
24
+ if "backend" in config["client_llm"]:
25
+ client_kwargs["llm_backend"] = config["client_llm"]["backend"]
26
+ else:
27
+ raise ValueError("config.yaml is missing client backend.")
28
+ client_kwargs["api_key"] = os.getenv("CLIENT_TOKEN")
29
  client_kwargs["temperature"] = config["client_llm"].get("temperature",.6)
30
  client_kwargs["max_tokens"] = config["client_llm"].get("max_tokens",800)
31
  else:
 
41
  guide_kwargs["inference_server_url"] = config["expert_llm"]["url"]
42
  else:
43
  raise ValueError("config.yaml is missing expert url.")
44
+ if "backend" in config["expert_llm"]:
45
+ guide_kwargs["llm_backend"] = config["expert_llm"]["backend"]
46
+ else:
47
+ raise ValueError("config.yaml is missing expert backend.")
48
+ guide_kwargs["api_key"] = os.getenv("GUIDE_TOKEN")
49
  else:
50
  raise ValueError("config.yaml is missing expert_llm settings.")
51
 
 
62
  guide_kwargs["classifier_kwargs"]["batch_size"] = int(config["classifier_llm"]["batch_size"])
63
  else:
64
  raise ValueError("config.yaml is missing classifier batch_size.")
65
+ guide_kwargs["classifier_kwargs"]["api_key"] = os.getenv("CLASSIFIER_TOKEN") # classifier api key
66
  else:
67
  raise ValueError("config.yaml is missing classifier_llm settings.")
68
 
 
 
 
69
  return client_kwargs, guide_kwargs
70
 
config.yaml CHANGED
@@ -1,11 +1,13 @@
1
  client_llm:
2
  url: "" # <-- start your own inference endpoint and provide url here (or use https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta)
3
  model_id: "HuggingFaceH4/zephyr-7b-beta" # <-- your client llm
 
4
  max_tokens: 800
5
  temperature: 0.6
6
  expert_llm:
7
  url: "" # <-- start your own inference endpoint and provide url here (or use https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct)
8
  model_id: "meta-llama/Meta-Llama-3-70B-Instruct"
 
9
  classifier_llm:
10
  model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
11
  url: "" # <-- start your own inference endpoint of classifier model and provide url here
 
1
  client_llm:
2
  url: "" # <-- start your own inference endpoint and provide url here (or use https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta)
3
  model_id: "HuggingFaceH4/zephyr-7b-beta" # <-- your client llm
4
+ backend: HFChat
5
  max_tokens: 800
6
  temperature: 0.6
7
  expert_llm:
8
  url: "" # <-- start your own inference endpoint and provide url here (or use https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-Instruct)
9
  model_id: "meta-llama/Meta-Llama-3-70B-Instruct"
10
+ backend: HFChat
11
  classifier_llm:
12
  model_id: "MoritzLaurer/DeBERTa-v3-base-mnli-fever-anli"
13
  url: "" # <-- start your own inference endpoint of classifier model and provide url here