File size: 28,563 Bytes
3157b84 fe70438 d08fbc6 6e6d8af 88c61d3 6e6d8af 3157b84 88c61d3 6e6d8af fe70438 6e6d8af 058c80a 6fc6810 6e6d8af fe70438 6e6d8af cc5f321 d08fbc6 6e6d8af d08fbc6 100c2eb b462f85 88c61d3 fe70438 6e6d8af d08fbc6 6e6d8af d08fbc6 b9d0035 6e6d8af 88c61d3 6e6d8af 100c2eb 6e6d8af fe70438 058c80a cc5f321 058c80a cc5f321 058c80a d08fbc6 058c80a cc5f321 058c80a fe70438 058c80a cc5f321 058c80a cc5f321 058c80a d08fbc6 058c80a d08fbc6 cc5f321 d08fbc6 cc5f321 d08fbc6 cc5f321 d08fbc6 88c61d3 d08fbc6 cc5f321 d08fbc6 fe70438 d08fbc6 058c80a 88c61d3 058c80a d08fbc6 058c80a d08fbc6 058c80a 6e6d8af 6fc6810 6e6d8af d08fbc6 6e6d8af b462f85 058c80a d08fbc6 b462f85 6e6d8af d08fbc6 058c80a d08fbc6 058c80a cc5f321 058c80a 6e6d8af 88c61d3 24df49f 88c61d3 24df49f 88c61d3 82055e6 88c61d3 6e6d8af 88c61d3 6e6d8af 88c61d3 3157b84 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 |
import json
import re
from collections import defaultdict
from functools import lru_cache
from statistics import mean
from typing import Any, Dict, Iterable, List, Optional
import pandas as pd
from datasets import Features, Value
from .dataclass import Dataclass
from .error_utils import Documentation, UnitxtError
from .operator import (
InstanceOperator,
MultiStreamOperator,
SequentialOperator,
SequentialOperatorInitializer,
StreamInitializerOperator,
)
from .operators import (
ApplyMetric,
ApplyOperatorsField,
ArtifactFetcherMixin,
FlattenInstances,
RecursiveCopy,
Rename,
)
from .register import _reset_env_local_catalogs, register_all_artifacts
from .schema import UNITXT_DATASET_SCHEMA
from .settings_utils import get_constants, get_settings
from .stream import DynamicStream, MultiStream
from .struct_data_operators import LoadJson
from .text_utils import to_pretty_string
from .type_utils import isoftype
from .utils import recursive_copy
constants = get_constants()
def nan_mean(scores):
result = mean(score for score in scores if score == score)
try:
return float(result)
except:
return result
class FromPredictionsAndOriginalData(StreamInitializerOperator):
def zip(self, predictions, references):
for prediction, original in zip(predictions, references):
if not isoftype(original, Dict[str, Any]):
raise Exception(
f"The dataset passed for evaluation is not valid. Perhaps you passed a full dataset with multiple splits for evaluation instead of only the a single 'test' split. The offending instance: {original} "
)
yield {**original, "prediction": prediction}
def process(
self, predictions: List[str], references: Iterable, split_name: str = "all"
) -> MultiStream:
return MultiStream(
{
split_name: DynamicStream(
self.zip,
gen_kwargs={"predictions": predictions, "references": references},
)
}
)
class DeleteTargetPrefix(InstanceOperator, ArtifactFetcherMixin):
def process(
self, instance: Dict[str, Any], stream_name: Optional[str] = None
) -> Dict[str, Any]:
if "metadata" in instance["task_data"]:
target_prefix = self.get_artifact(
instance["task_data"]["metadata"]["template"]
).target_prefix
if target_prefix is not None and len(target_prefix) > 0:
target_prefix = target_prefix.format(**instance["task_data"])
pattern = rf"^\s*{re.escape(target_prefix)}\s*"
instance["prediction"] = re.sub(pattern, "", instance["prediction"])
return instance
_post_process_steps = SequentialOperator(
steps=[
RecursiveCopy(
field="prediction",
to_field="raw_prediction",
),
RecursiveCopy(
field="references",
to_field="raw_references",
dont_apply_to_streams=[constants.inference_stream],
),
RecursiveCopy(
field="source",
to_field="task_data/source",
),
DeleteTargetPrefix(),
ApplyOperatorsField(
operators_field="postprocessors",
),
RecursiveCopy(
field="prediction",
to_field="processed_prediction",
),
RecursiveCopy(
field="references",
to_field="processed_references",
dont_apply_to_streams=[constants.inference_stream],
),
]
)
@lru_cache(maxsize=None)
def group_str(json_str):
data = json.loads(json_str)
return ",".join(f"{k}:{v}" for k, v in data.items())
class SplitSubsetsAndGroups(MultiStreamOperator):
"""Splits a MultiStream that is small - for metrics, hence: whole stream can sit in memory, split by the value of field 'group'.
Args:
number_of_fusion_generations: int
the value in field group is of the form "sourcen/sourcenminus1/..." describing the sources in which the instance sat
when these were fused, potentially several phases of fusion. the name of the most recent source sits first in this value.
(See BaseFusion and its extensions)
subsets_depth specifies the depth of the prefix by which to split the stream.
"""
subsets_field: str = "subset"
groups_field: str = "groups"
subset_depth: Optional[int] = None
def process(self, multi_stream: MultiStream) -> MultiStream:
result = defaultdict(list)
for stream_name, stream in multi_stream.items():
for i, instance in enumerate(stream):
instance["__idx__"] = i
for field in [self.subsets_field, self.groups_field]:
if field not in instance:
raise ValueError(
f"Field {field} is missing from instance {instance}"
)
subset_stream_name = (
stream_name
+ "://"
+ "/".join(instance[self.subsets_field][: self.subset_depth])
)
result[subset_stream_name].append(instance)
for group in instance[self.groups_field]:
result[subset_stream_name + "?" + group_str(group)].append(instance)
return MultiStream.from_iterables(result, copying=True)
@lru_cache(maxsize=None)
def group_str_to_key_value(group_str):
keys = []
values = []
for k_v in group_str.split(","):
k, v = k_v.split(":")
if v.isdigit():
v = int(v)
keys.append(k)
values.append(v)
if len(keys) == 1:
key = keys[0]
else:
key = tuple(keys)
if len(values) == 1:
value = values[0]
else:
value = tuple(values)
return key, value
@lru_cache(maxsize=None)
def stream_name_to_origin_subset_group(stream_name):
origin, subset_group = stream_name.split("://")
if "?" in subset_group:
subset, group = subset_group.split("?")
else:
subset, group = subset_group, None
return origin, subset, group
class JoinSubsetsAndGroups(MultiStreamOperator):
def process(self, multi_stream: MultiStream) -> MultiStream:
instances = defaultdict(dict)
global_scores = defaultdict(dict)
for stream_name, stream in multi_stream.items():
origin, subset, group = stream_name_to_origin_subset_group(stream_name)
for i, instance in enumerate(stream):
global_score = instance["score"].pop("global")
idx = instance.pop("__idx__")
if idx not in instances[origin]:
instances[origin][idx] = instance
# from here below setting the global scores from that stream
# can be done with first instance only
if i > 0:
continue
if not group and not subset:
global_scores[origin]["global"] = global_score
else:
path = []
if subset:
path += ["subsets", *subset.split("/")]
if group:
key, value = group_str_to_key_value(group)
path += ["groups", key, value]
target = global_scores[origin]
for part in path[:-1]:
if part not in target:
target[part] = {}
target = target[part]
target[path[-1]] = global_score
# the leafs always have score_name and score
def recursive_mean(dic):
if isinstance(dic, dict):
if "score" in dic and "score_name" in dic:
return dic
result = {}
all_scores = []
all_num_of_instances = []
for k, v in dic.items():
score = recursive_mean(v)
if score is not None:
all_scores.append(score["score"])
if "num_of_instances" in score:
all_num_of_instances.append(score["num_of_instances"])
result[k] = score
result["score"] = nan_mean(all_scores)
result["score_name"] = "subsets_mean"
if all_num_of_instances:
result["num_of_instances"] = sum(all_num_of_instances)
if result:
return result
return None
result = {}
for stream_name, stream_instances in instances.items():
score = global_scores[stream_name]
if "subsets" in score:
score["subsets"] = recursive_mean(score["subsets"])
score["global"] = {
"score": score["subsets"]["score"],
"score_name": score["subsets"]["score_name"],
"subsets_mean": score["subsets"]["score"],
}
if "num_of_instances" in score["subsets"]:
score["global"]["num_of_instances"] = score["subsets"][
"num_of_instances"
]
sorted_instances = []
for key in sorted(stream_instances.keys()):
instance = stream_instances[key]
instance["score"].update(recursive_copy(score))
sorted_instances.append(instance)
result[stream_name] = sorted_instances
return MultiStream.from_iterables(result, copying=True)
class PostProcessRecipe(SequentialOperatorInitializer):
def prepare(self):
register_all_artifacts()
self.steps = [
FromPredictionsAndOriginalData(),
LoadJson(field="task_data"),
_post_process_steps,
]
def _inference_post_process(
predictions: List[str],
references: Iterable,
split_name: str = constants.inference_stream,
):
_reset_env_local_catalogs()
register_all_artifacts()
recipe = PostProcessRecipe()
multi_stream = recipe(
predictions=predictions, references=references, split_name=split_name
)
return [instance["processed_prediction"] for instance in multi_stream[split_name]]
class MetricRecipe(SequentialOperatorInitializer):
calc_confidence_intervals: bool = True
subset_depth: int = 2
def prepare(self):
register_all_artifacts()
self.steps = [
FromPredictionsAndOriginalData(),
LoadJson(field="task_data"),
_post_process_steps,
SplitSubsetsAndGroups(
subset_depth=self.subset_depth,
),
ApplyMetric(
"metrics",
calc_confidence_intervals=self.calc_confidence_intervals,
),
JoinSubsetsAndGroups(),
Rename(
field="raw_prediction",
to_field="prediction",
),
Rename(
field="raw_references",
to_field="references",
),
RecursiveCopy(
field="source",
to_field="task_data/source",
),
]
UNITXT_METRIC_SCHEMA = Features(
{"predictions": Value("string"), "references": dict(UNITXT_DATASET_SCHEMA)}
)
class GlobalScores(dict):
"""GlobalScores is a dictionary-based class designed to handle and transform metric results into a structured format.
Args:
score (float):
The main score value.
score_name (str):
The name of the main score.
"""
@property
def score(self):
return self["score"]
@property
def score_name(self):
return self["score_name"]
def to_df(self):
"""Transforms a dictionary of results into a pandas dataframe.
Transforms a dictionary of results into a dataframe with score_name as the index,
and columns for score, ci_low, and ci_high. Handles cases where confidence intervals are missing.
Returns:
pd.DataFrame: A dataframe with the extracted information, indexed by score_name.
"""
import pandas as pd
rows = []
# Extract data based on score names
for key, value in self.items():
if key.endswith("_ci_low") or key.endswith("_ci_high"):
continue # Skip confidence interval keys for now
if isinstance(value, (int, float)): # Only consider numerical scores
score_name = key
ci_low = self.get(f"{key}_ci_low", None)
ci_high = self.get(f"{key}_ci_high", None)
rows.append(
{
"score_name": score_name,
"score": value,
"ci_low": ci_low,
"ci_high": ci_high,
}
)
df = pd.DataFrame(rows)
return df.set_index("score_name")
def __repr__(self):
return to_pretty_string(self, float_format=".2g")
@property
def summary(self):
df = self.to_df().round(2).fillna("")
df = df.sort_index()
df = df.drop("num_of_instances", axis=0)
df = df.reset_index()
score_name = self["score_name"]
num_of_instances = self["num_of_instances"]
return (
df.to_markdown(index=False)
+ f"\nMain Score: {score_name}\nNum Instances: {num_of_instances}"
)
class SubsetsScores(dict):
def __repr__(self):
return to_pretty_string(self, float_format=".2g")
@property
def summary(self):
rows = []
data = self
rows = []
all_group_types = set()
def walk_subsets(node, subset_path):
# Check if this node represents a subset level by checking "score" and "score_name"
is_subset_node = "score" in node and "score_name" in node
# Extract subset-level info if this is a subset node
if is_subset_node:
subset_score = node.get("score", "")
subset_score_name = node.get("score_name", "")
subset_ci_low = node.get("score_ci_low", "")
subset_ci_high = node.get("score_ci_high", "")
subset_num_instances = node.get("num_of_instances", "")
# Check for groups at this level
groups = node.get("groups", {})
if groups:
# If there are groups, we create one row per group entry
for group_type, group_dict in groups.items():
for group_name, group_metrics in group_dict.items():
g_score = group_metrics.get("score", subset_score)
g_score_name = group_metrics.get(
"score_name", subset_score_name
)
g_ci_low = group_metrics.get("score_ci_low", subset_ci_low)
g_ci_high = group_metrics.get(
"score_ci_high", subset_ci_high
)
g_num_instances = group_metrics.get(
"num_of_instances", subset_num_instances
)
all_group_types.add(group_type)
row = {
"subset": ".".join(subset_path)
if subset_path
else "ALL",
"score": g_score,
"score_name": g_score_name,
"score_ci_low": g_ci_low,
"score_ci_high": g_ci_high,
"num_of_instances": g_num_instances,
group_type: str(group_name),
}
rows.append(row)
else:
# No groups, just one row for this subset node
row = {
"subset": ".".join(subset_path) if subset_path else "ALL",
"score": subset_score,
"score_name": subset_score_name,
"score_ci_low": subset_ci_low,
"score_ci_high": subset_ci_high,
"num_of_instances": subset_num_instances,
}
rows.append(row)
# Now check for deeper subsets: any key in node that leads to another dict with "score" and "score_name"
# or even if it doesn't have score, we still recurse to find deeper subsets.
for k, v in node.items():
if isinstance(v, dict) and k != "groups":
# If v is a dict, recurse
# We'll attempt to go deeper since subsets can be arbitrary depth
# We do not require v to have score/score_name at this time, recursion can find deeper ones.
walk_subsets(v, [*subset_path, k])
# Start recursion from top-level
walk_subsets(data, [])
# Convert to DataFrame
df = pd.DataFrame(rows)
# Ensure columns exist for all group types
for gt in all_group_types:
if gt not in df.columns:
df[gt] = ""
# Replace NaN with ""
df = df.fillna("")
# Remove columns that are all empty strings
df = df.drop(columns=[col for col in df.columns if df[col].eq("").all()])
# Attempt to order columns in a logical manner:
# subset first, then any group type columns, then score fields
fixed_cols = [
"subset",
"score",
"score_name",
"score_ci_low",
"score_ci_high",
"num_of_instances",
]
group_type_cols = [
c for c in df.columns if c not in fixed_cols and c != "subset"
]
order = [
"subset",
*group_type_cols,
"score",
"score_name",
"score_ci_low",
"score_ci_high",
"num_of_instances",
]
order = [c for c in order if c in df.columns]
df = df[order]
return df.to_markdown(index=False)
class GroupsScores(dict):
"""A dictionary subclass to store and manage group scores.
This class provides a property to summarize the scores and a custom
string representation for pretty-printing.
"""
@property
def summary(self):
"""A property to get a summary of the group scores."""
data = self
# Desired metric columns
metric_cols = [
"score",
"score_name",
"score_ci_low",
"score_ci_high",
"num_of_instances",
]
output_lines = []
for scenario_key, scenario_data in data.items():
# scenario_key could be a single string or a tuple of strings
if isinstance(scenario_key, tuple):
scenario_groups = scenario_key
else:
scenario_groups = (scenario_key,)
# Build rows for this scenario
rows = []
for group_name_key, metrics in scenario_data.items():
# group_name_key should match the structure of scenario_groups
if isinstance(group_name_key, tuple):
group_names = group_name_key
else:
group_names = (group_name_key,)
# Create a row with group columns and metric columns
row = {}
for g_type, g_name in zip(scenario_groups, group_names):
row[g_type] = str(g_name)
# Add desired metrics
for mcol in metric_cols:
row[mcol] = metrics.get(mcol, "")
rows.append(row)
# Convert this scenario's rows to a DataFrame
if rows:
df = pd.DataFrame(rows)
else:
# No rows means empty DataFrame
df = pd.DataFrame(columns=list(scenario_groups) + metric_cols)
# Fill NaN with ""
df = df.fillna("")
# Remove columns that are entirely empty
df = df.drop(columns=[col for col in df.columns if df[col].eq("").all()])
# Order columns: group types first (in the order they appear in scenario_groups), then metrics
final_cols = [col for col in scenario_groups if col in df.columns] + [
col for col in metric_cols if col in df.columns
]
df = df[final_cols]
# Title for this scenario
if len(scenario_groups) == 1:
title = f"# Group By: {scenario_groups[0]}"
else:
title = "# Group By: " + ", ".join(scenario_groups)
output_lines.append(title)
if not df.empty:
output_lines.append(df.to_markdown(index=False))
else:
output_lines.append("_No matching rows_")
output_lines.append("")
return "\n".join(output_lines)
def __repr__(self):
return to_pretty_string(self, float_format=".2g")
class InstanceScores(list):
def __init__(self, instances):
self.original_instances = instances
instance_scores = []
for instance in instances:
instance = instance.copy()
scores = instance.pop("score")
task_data = instance.pop("task_data")
instance_scores.append(
{
**task_data,
**instance,
**scores["instance"],
}
)
super().__init__(instance_scores)
def to_df(self, flatten=True, columns=None):
"""Transforms the stored results into a pandas DataFrame.
Args:
flatten (bool, optional): Determines whether to use the flattened list of results (`self`)
or the original instances (`self.original_instances`). Defaults to True.
columns (list, optional): A list of column names to select from the resulting DataFrame.
If None, all columns are included. Defaults to None.
Returns:
pandas.DataFrame: A DataFrame containing the transformed results. If `columns` is specified,
only the specified columns are included.
Raises:
KeyError: If any specified column in `columns` does not exist in the DataFrame.
"""
from pandas import DataFrame
if flatten:
df = DataFrame(self)
else:
df = DataFrame(self.original_instances)
if columns is not None:
return df[columns]
return df
@property
def summary(self):
return to_pretty_string(
self.to_df()
.head()
.drop(
columns=[
"metadata",
"media",
"data_classification_policy",
"groups",
"subset",
]
),
float_format=".2g",
)
def __repr__(self):
return to_pretty_string(self, float_format=".2g")
class EvaluationResults(list):
def __init__(self, *args, metadata=None, **kwargs):
super().__init__(*args, **kwargs)
self.metadata = metadata if metadata is not None else {}
@property
def global_scores(self):
return GlobalScores(self[0]["score"]["global"])
@property
def instance_scores(self) -> InstanceScores:
return InstanceScores(self)
@property
def groups_scores(self):
if "groups" not in self[0]["score"]:
raise UnitxtError(
"Groups scores not found try using group_by in the recipe",
additional_info_id=Documentation.EVALUATION,
)
return GroupsScores(self[0]["score"]["groups"])
@property
def subsets_scores(self):
if "subsets" not in self[0]["score"]:
raise UnitxtError(
"Subsets scores not found try using Benchmark",
additional_info_id=Documentation.BENCHMARKS,
)
return SubsetsScores(self[0]["score"]["subsets"])
def _compute(
predictions: List[Any],
references: Iterable,
flatten: bool = False,
split_name: str = "all",
calc_confidence_intervals: bool = True,
):
_reset_env_local_catalogs()
register_all_artifacts()
recipe = MetricRecipe(calc_confidence_intervals=calc_confidence_intervals)
multi_stream = recipe(
predictions=predictions, references=references, split_name=split_name
)
if flatten:
operator = FlattenInstances()
multi_stream = operator(multi_stream)
stream = multi_stream[split_name]
return EvaluationResults(stream)
"""
The API of a metric service:
- MetricRequest: A single input request to the metrics service.
- MetricResponse: A response returned from a metrics service.
"""
class InstanceInput(Dataclass):
"""A single instance inputted to a metric service."""
prediction: Any
references: List[Any]
additional_inputs: Optional[Dict] = None
class MetricRequest(Dataclass):
"""A request to a metrics service, includes a list of input instances."""
instance_inputs: List[InstanceInput]
class MetricResponse(Dataclass):
"""A response produced by a metrics service, includes the computed scores."""
# A list of instance score dictionaries. Each dictionary contains the
# score names and score values for a single instance.
instances_scores: List[Dict[str, Any]]
# The global scores dictionary, containing global score names and values.
# These are scores computed over the entire set of input instances, e.g.
# an average over a score computed per instance.
global_score: Dict[str, Any]
"""
Functionality for loading the remote metrics configuration from local environment variables.
"""
# A list of metrics to be executed remotely.
# For example: '["metrics.rag.context_relevance","metrics.rag.bert_k_precision"]'
# This value should be a valid json list
UNITXT_REMOTE_METRICS = "UNITXT_REMOTE_METRICS"
# The remote endpoint on which the remote metrics are available.
# For example, 'http://127.0.0.1:8000/compute'
UNITXT_REMOTE_METRICS_ENDPOINT = "UNITXT_REMOTE_METRICS_ENDPOINT"
def get_remote_metrics_names() -> List[str]:
"""Load the remote metrics names from an environment variable.
Returns:
List[str] - names of metrics to be executed remotely.
"""
settings = get_settings()
remote_metrics = settings.remote_metrics
if remote_metrics:
remote_metrics = json.loads(remote_metrics)
if not isinstance(remote_metrics, list):
raise RuntimeError(
f"Unexpected value {remote_metrics} for the '{UNITXT_REMOTE_METRICS}' environment variable. "
f"The value is expected to be a list of metric names in json format."
)
for remote_metric in remote_metrics:
if not isinstance(remote_metric, str):
raise RuntimeError(
f"Unexpected value {remote_metric} within the '{UNITXT_REMOTE_METRICS}' environment variable. "
f"The value is expected to be a string but its type is {type(remote_metric)}."
)
return remote_metrics
def get_remote_metrics_endpoint() -> str:
"""Load the remote metrics endpoint from an environment variable.
Returns:
str - The remote endpoint on which the remote metrics are available.
"""
settings = get_settings()
try:
remote_metrics_endpoint = settings.remote_metrics_endpoint
except AttributeError as e:
raise RuntimeError(
f"Unexpected None value for '{UNITXT_REMOTE_METRICS_ENDPOINT}'. "
f"Running remote metrics requires defining an "
f"endpoint in the environment variable '{UNITXT_REMOTE_METRICS_ENDPOINT}'."
) from e
return remote_metrics_endpoint
|