saicharan2804 commited on
Commit
cad7bde
·
1 Parent(s): 2a3cfbf

Removed some metrics

Browse files
Files changed (1) hide show
  1. molgenevalmetric.py +1 -26
molgenevalmetric.py CHANGED
@@ -81,30 +81,10 @@ class molgenevalmetric(evaluate.Metric):
81
 
82
  Results = metrics.get_all_metrics(gen = generated_smiles, train= train_smiles)
83
 
84
- # evaluator = Evaluator(name = 'Diversity')
85
- # Diversity = evaluator(generated_smiles)
86
-
87
- # Results = {}
88
-
89
  evaluator = Evaluator(name = 'KL_Divergence')
90
  KL_Divergence = evaluator(generated_smiles, train_smiles)
91
-
92
- # evaluator = Evaluator(name = 'FCD_Distance')
93
- # FCD_Distance = evaluator(generated_smiles, train_smiles)
94
-
95
- # evaluator = Evaluator(name = 'Novelty')
96
- # Novelty = evaluator(generated_smiles, train_smiles)
97
-
98
- # evaluator = Evaluator(name = 'Validity')
99
- # Validity = evaluator(generated_smiles)
100
-
101
-
102
  Results.update({
103
- # "PyTDC_Diversity": Diversity,
104
  "KL_Divergence": KL_Divergence,
105
- # "PyTDC_Validity": Validity,FCD_Distance": FCD_Distance,
106
- # "PyTDC_Novelty": Novelty,
107
- # "PyTDC_
108
  })
109
 
110
 
@@ -114,26 +94,21 @@ class molgenevalmetric(evaluate.Metric):
114
  'Median', 'Isomers', 'Valsartan_SMARTS', 'Hop'
115
  ]
116
 
117
- # Iterate through each oracle and compute its score
118
  for oracle_name in oracle_list:
119
  oracle = Oracle(name=oracle_name)
120
  if oracle_name in ['Rediscovery', 'MPO', 'Similarity', 'Median', 'Isomers', 'Hop']:
121
- # Assuming these oracles return a dictionary where values are lists of scores
122
  score = oracle(generated_smiles)
123
  if isinstance(score, dict):
124
- # Convert lists of scores to average score for these specific metrics
125
  score = {key: sum(values)/len(values) for key, values in score.items()}
126
  else:
127
- # Assuming other oracles return a list of scores
128
  score = oracle(generated_smiles)
129
  if isinstance(score, list):
130
- # Convert list of scores to average score
131
  score = sum(score) / len(score)
132
 
133
  Results.update({f"{oracle_name}": score})
134
 
135
  keys_to_remove = ["FCD/TestSF", "SNN/TestSF", "Frag/TestSF", "Scaf/TestSF"]
136
  for key in keys_to_remove:
137
- result['results'].pop(key, None)
138
 
139
  return {"results": Results}
 
81
 
82
  Results = metrics.get_all_metrics(gen = generated_smiles, train= train_smiles)
83
 
 
 
 
 
 
84
  evaluator = Evaluator(name = 'KL_Divergence')
85
  KL_Divergence = evaluator(generated_smiles, train_smiles)
 
 
 
 
 
 
 
 
 
 
 
86
  Results.update({
 
87
  "KL_Divergence": KL_Divergence,
 
 
 
88
  })
89
 
90
 
 
94
  'Median', 'Isomers', 'Valsartan_SMARTS', 'Hop'
95
  ]
96
 
 
97
  for oracle_name in oracle_list:
98
  oracle = Oracle(name=oracle_name)
99
  if oracle_name in ['Rediscovery', 'MPO', 'Similarity', 'Median', 'Isomers', 'Hop']:
 
100
  score = oracle(generated_smiles)
101
  if isinstance(score, dict):
 
102
  score = {key: sum(values)/len(values) for key, values in score.items()}
103
  else:
 
104
  score = oracle(generated_smiles)
105
  if isinstance(score, list):
 
106
  score = sum(score) / len(score)
107
 
108
  Results.update({f"{oracle_name}": score})
109
 
110
  keys_to_remove = ["FCD/TestSF", "SNN/TestSF", "Frag/TestSF", "Scaf/TestSF"]
111
  for key in keys_to_remove:
112
+ Results.pop(key, None)
113
 
114
  return {"results": Results}