diff --git "a/repoeval/func_level/test.jsonl" "b/repoeval/func_level/test.jsonl" new file mode 100644--- /dev/null +++ "b/repoeval/func_level/test.jsonl" @@ -0,0 +1,455 @@ +{"prompt": "import random\nimport sys\nfrom abc import abstractmethod, abstractstaticmethod\nfrom typing import Any, Callable, Dict, Iterable\n\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom trlx.data import GeneralElement, RLElement\n\n# specifies a dictionary of architectures\n_DATAPIPELINE: Dict[str, any] = {} # registry\n\n\ndef register_datapipeline(name):\n \"\"\"Decorator used register a CARP architecture\n Args:\n name: Name of the architecture\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/0", "ground_truth": " def register_class(cls, name):\n _DATAPIPELINE[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n\n if isinstance(name, str):\n name = name.lower()\n return lambda c: register_class(c, name)\n\n cls = name\n name = cls.__name__\n register_class(cls, name.lower())\n\n return cls\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "pipeline", "__init__.py"], "context_start_lineno": 0, "lineno": 19, "function_name": "register_datapipeline"}, "groundtruth": " def register_class(cls, name):\n _DATAPIPELINE[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n\n if isinstance(name, str):\n name = name.lower()\n return lambda c: register_class(c, name)\n\n cls = name\n name = cls.__name__\n register_class(cls, name.lower())\n\n return cls\n"} +{"prompt": "import random\nimport sys\nfrom abc import abstractmethod, abstractstaticmethod\nfrom typing import Any, Callable, Dict, Iterable\n\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom trlx.data import GeneralElement, RLElement\n\n# specifies a dictionary of architectures\n_DATAPIPELINE: Dict[str, any] = {} # registry\n\n\ndef register_datapipeline(name):\n \"\"\"Decorator used register a CARP architecture\n Args:\n name: Name of the architecture\n \"\"\"\n\n def register_class(cls, name):", "metadata": {"task_id": "CarperAI--trlx/1", "ground_truth": " _DATAPIPELINE[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "pipeline", "__init__.py"], "context_start_lineno": 0, "lineno": 20, "function_name": "register_class"}, "groundtruth": " _DATAPIPELINE[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n"} +{"prompt": "from dataclasses import dataclass, field\nfrom typing import Any, Dict, Optional, Set\n\nimport yaml\n\nfrom trlx.data.method_configs import MethodConfig, get_method\n\n\ndef merge(base: Dict, update: Dict, updated: Set) -> Dict:\n \"Recursively updates a nested dictionary with new values\"\n for k, v in base.items():\n if k in update and isinstance(v, dict):\n base[k] = merge(v, update[k], updated)\n updated.add(k)\n elif k in update:\n base[k] = update[k]\n updated.add(k)\n\n return base\n\n\n@dataclass\nclass ModelConfig:\n \"\"\"\n Config for a model.\n\n :param model_path: Path or name of the model (local or on huggingface hub)\n :type model_path: str\n\n :param model_arch_type: Type of model architecture. Either \"causal\" or \"seq2seq\"\n :type model_arch_type: str\n\n :param num_layers_unfrozen: Number of layers to unfreeze for fine-tuning.\n -1 means all layers are unfrozen.\n :type num_layers_unfrozen: int\n\n :param delta_kwargs: Keyword arguments for instantiating OpenDelta models for delta-tuning.\n Follow the `OpenDelta.AutoDeltaConfig` specification, e.g. for LoRA style tuning, set\n the `delta_type` to `lora` and include the model specific hyper-parameters (e.g. `lora_r`)\n {\"delta_type\": \"lora\", \"modified_modules\": \"all\", \"lora_r\": 8, \"lora_alpha\": 16, \"lora_dropout\": 0.0}\n or in YAML format:\n delta_kwargs:\n delta_type: lora\n modified_modules: \"all\"\n lora_r: 8\n lora_alpha: 16\n lora_dropout: 0.0\n See: https://opendelta.readthedocs.io/en/latest/modules/auto_delta.html#opendelta.auto_delta.AutoDeltaConfig\n :type delta_kwargs: Optional[Dict[str, Any]]\n \"\"\"\n\n model_path: str\n model_arch_type: str = \"causal\"\n num_layers_unfrozen: int = -1\n delta_kwargs: Optional[Dict[str, Any]] = None\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass TokenizerConfig:\n \"\"\"\n Config for a model.\n\n :param tokenizer_path: Path or name of the tokenizer (local or on huggingface hub)\n :type tokenizer_path: str\n\n :param padding_side: Padding side\n :type padding_path: str\n\n :param truncation_side: Truncation side\n :type truncation_side: str\n \"\"\"\n\n tokenizer_path: str\n padding_side: str = \"left\"\n truncation_side: str = \"right\"\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass OptimizerConfig:\n \"\"\"\n Config for an optimizer.\n\n :param name: Name of the optimizer\n :type name: str\n\n :param kwargs: Keyword arguments for the optimizer (e.g. lr, betas, eps, weight_decay)\n :type kwargs: Dict[str, Any]\n \"\"\"\n\n name: str\n kwargs: Dict[str, Any] = field(default_factory=dict)\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass SchedulerConfig:\n \"\"\"\n Config for a learning rate scheduler.\n\n :param name: Name of the scheduler\n :type name: str\n\n :param kwargs: Keyword arguments for the scheduler instance (e.g. warmup_steps, T_max)\n :type kwargs: Dict[str, Any]\n \"\"\"\n\n name: str\n kwargs: Dict[str, Any] = field(default_factory=dict)\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass TrainConfig:\n \"\"\"\n Config for train job on model.\n\n :param total_steps: Total number of training steps\n :type total_steps: int\n\n :param seq_length: Number of tokens to use as context (max length for tokenizer)\n :type seq_length: int\n\n :param epochs: Total number of passes through data\n :type epochs: int\n\n :param batch_size: Batch size for training\n :type batch_size: int\n\n :param tracker: Tracker to use for logging. Default: \"wandb\"\n :type tracker: str\n\n :param checkpoint_interval: Save model every checkpoint_interval steps\n :type checkpoint_interval: int\n\n :param eval_interval: Evaluate model every eval_interval steps\n :type eval_interval: int\n\n :param pipeline: Pipeline to use for training. One of the registered pipelines present in trlx.pipeline\n :type pipeline: str\n\n :param trainer: Trainer to use for training. One of the registered trainers present in trlx.trainer\n :type trainer: str\n\n :param trainer_kwargs: Extra keyword arguments for the trainer\n :type trainer: Dict[str, Any]\n\n :param project_name: Project name for wandb\n :type project_name: str\n\n :param entity_name: Entity name for wandb\n :type entity_name: str\n\n :param group_name: Group name for wandb (used for grouping runs)\n :type group_name: str\n\n :param checkpoint_dir: Directory to save checkpoints\n :type checkpoint_dir: str\n\n :param rollout_logging_dir: Directory to store generated rollouts for use in Algorithm Distillation.\n Only used by AcceleratePPOTrainer.\n :type rollout_logging_dir: Optional[str]\n\n :param save_best: Save best model based on mean reward\n :type save_best: bool\n\n :param seed: Random seed\n :type seed: int\n \"\"\"\n\n total_steps: int\n seq_length: int\n epochs: int\n batch_size: int\n\n checkpoint_interval: int\n eval_interval: int\n\n pipeline: str # One of the pipelines in framework.pipeline\n trainer: str # One of the trainers\n trainer_kwargs: Dict[str, Any] = field(default_factory=dict) # Extra keyword arguments for the trainer\n\n project_name: str = \"trlx\"\n entity_name: Optional[str] = None\n group_name: Optional[str] = None\n\n checkpoint_dir: str = \"ckpts\"\n rollout_logging_dir: Optional[str] = None\n save_best: bool = True\n\n tracker: Optional[str] = \"wandb\"\n logging_dir: Optional[str] = None\n\n seed: int = 1000\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass TRLConfig:\n \"\"\"\n Top level config for trlX. Loads configs and can be converted to dictionary.\n \"\"\"\n\n method: MethodConfig\n model: ModelConfig\n optimizer: OptimizerConfig\n scheduler: SchedulerConfig\n tokenizer: TokenizerConfig\n train: TrainConfig\n\n @classmethod\n def load_yaml(cls, yml_fp: str):\n \"\"\"\n Load yaml file as TRLConfig.\n\n :param yml_fp: Path to yaml file\n :type yml_fp: str\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/2", "ground_truth": " with open(yml_fp, mode=\"r\") as file:\n config = yaml.safe_load(file)\n return cls.from_dict(config)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "data", "configs.py"], "context_start_lineno": 0, "lineno": 234, "function_name": "load_yaml"}, "groundtruth": " with open(yml_fp, mode=\"r\") as file:\n config = yaml.safe_load(file)\n return cls.from_dict(config)\n"} +{"prompt": "from dataclasses import dataclass, field\nfrom typing import Any, Dict, Optional, Set\n\nimport yaml\n\nfrom trlx.data.method_configs import MethodConfig, get_method\n\n\ndef merge(base: Dict, update: Dict, updated: Set) -> Dict:\n \"Recursively updates a nested dictionary with new values\"\n for k, v in base.items():\n if k in update and isinstance(v, dict):\n base[k] = merge(v, update[k], updated)\n updated.add(k)\n elif k in update:\n base[k] = update[k]\n updated.add(k)\n\n return base\n\n\n@dataclass\nclass ModelConfig:\n \"\"\"\n Config for a model.\n\n :param model_path: Path or name of the model (local or on huggingface hub)\n :type model_path: str\n\n :param model_arch_type: Type of model architecture. Either \"causal\" or \"seq2seq\"\n :type model_arch_type: str\n\n :param num_layers_unfrozen: Number of layers to unfreeze for fine-tuning.\n -1 means all layers are unfrozen.\n :type num_layers_unfrozen: int\n\n :param delta_kwargs: Keyword arguments for instantiating OpenDelta models for delta-tuning.\n Follow the `OpenDelta.AutoDeltaConfig` specification, e.g. for LoRA style tuning, set\n the `delta_type` to `lora` and include the model specific hyper-parameters (e.g. `lora_r`)\n {\"delta_type\": \"lora\", \"modified_modules\": \"all\", \"lora_r\": 8, \"lora_alpha\": 16, \"lora_dropout\": 0.0}\n or in YAML format:\n delta_kwargs:\n delta_type: lora\n modified_modules: \"all\"\n lora_r: 8\n lora_alpha: 16\n lora_dropout: 0.0\n See: https://opendelta.readthedocs.io/en/latest/modules/auto_delta.html#opendelta.auto_delta.AutoDeltaConfig\n :type delta_kwargs: Optional[Dict[str, Any]]\n \"\"\"\n\n model_path: str\n model_arch_type: str = \"causal\"\n num_layers_unfrozen: int = -1\n delta_kwargs: Optional[Dict[str, Any]] = None\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass TokenizerConfig:\n \"\"\"\n Config for a model.\n\n :param tokenizer_path: Path or name of the tokenizer (local or on huggingface hub)\n :type tokenizer_path: str\n\n :param padding_side: Padding side\n :type padding_path: str\n\n :param truncation_side: Truncation side\n :type truncation_side: str\n \"\"\"\n\n tokenizer_path: str\n padding_side: str = \"left\"\n truncation_side: str = \"right\"\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass OptimizerConfig:\n \"\"\"\n Config for an optimizer.\n\n :param name: Name of the optimizer\n :type name: str\n\n :param kwargs: Keyword arguments for the optimizer (e.g. lr, betas, eps, weight_decay)\n :type kwargs: Dict[str, Any]\n \"\"\"\n\n name: str\n kwargs: Dict[str, Any] = field(default_factory=dict)\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass SchedulerConfig:\n \"\"\"\n Config for a learning rate scheduler.\n\n :param name: Name of the scheduler\n :type name: str\n\n :param kwargs: Keyword arguments for the scheduler instance (e.g. warmup_steps, T_max)\n :type kwargs: Dict[str, Any]\n \"\"\"\n\n name: str\n kwargs: Dict[str, Any] = field(default_factory=dict)\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass TrainConfig:\n \"\"\"\n Config for train job on model.\n\n :param total_steps: Total number of training steps\n :type total_steps: int\n\n :param seq_length: Number of tokens to use as context (max length for tokenizer)\n :type seq_length: int\n\n :param epochs: Total number of passes through data\n :type epochs: int\n\n :param batch_size: Batch size for training\n :type batch_size: int\n\n :param tracker: Tracker to use for logging. Default: \"wandb\"\n :type tracker: str\n\n :param checkpoint_interval: Save model every checkpoint_interval steps\n :type checkpoint_interval: int\n\n :param eval_interval: Evaluate model every eval_interval steps\n :type eval_interval: int\n\n :param pipeline: Pipeline to use for training. One of the registered pipelines present in trlx.pipeline\n :type pipeline: str\n\n :param trainer: Trainer to use for training. One of the registered trainers present in trlx.trainer\n :type trainer: str\n\n :param trainer_kwargs: Extra keyword arguments for the trainer\n :type trainer: Dict[str, Any]\n\n :param project_name: Project name for wandb\n :type project_name: str\n\n :param entity_name: Entity name for wandb\n :type entity_name: str\n\n :param group_name: Group name for wandb (used for grouping runs)\n :type group_name: str\n\n :param checkpoint_dir: Directory to save checkpoints\n :type checkpoint_dir: str\n\n :param rollout_logging_dir: Directory to store generated rollouts for use in Algorithm Distillation.\n Only used by AcceleratePPOTrainer.\n :type rollout_logging_dir: Optional[str]\n\n :param save_best: Save best model based on mean reward\n :type save_best: bool\n\n :param seed: Random seed\n :type seed: int\n \"\"\"\n\n total_steps: int\n seq_length: int\n epochs: int\n batch_size: int\n\n checkpoint_interval: int\n eval_interval: int\n\n pipeline: str # One of the pipelines in framework.pipeline\n trainer: str # One of the trainers\n trainer_kwargs: Dict[str, Any] = field(default_factory=dict) # Extra keyword arguments for the trainer\n\n project_name: str = \"trlx\"\n entity_name: Optional[str] = None\n group_name: Optional[str] = None\n\n checkpoint_dir: str = \"ckpts\"\n rollout_logging_dir: Optional[str] = None\n save_best: bool = True\n\n tracker: Optional[str] = \"wandb\"\n logging_dir: Optional[str] = None\n\n seed: int = 1000\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\n@dataclass\nclass TRLConfig:\n \"\"\"\n Top level config for trlX. Loads configs and can be converted to dictionary.\n \"\"\"\n\n method: MethodConfig\n model: ModelConfig\n optimizer: OptimizerConfig\n scheduler: SchedulerConfig\n tokenizer: TokenizerConfig\n train: TrainConfig\n\n @classmethod\n def load_yaml(cls, yml_fp: str):\n \"\"\"\n Load yaml file as TRLConfig.\n\n :param yml_fp: Path to yaml file\n :type yml_fp: str\n \"\"\"\n with open(yml_fp, mode=\"r\") as file:\n config = yaml.safe_load(file)\n return cls.from_dict(config)\n\n def to_dict(self):\n \"\"\"\n Convert TRLConfig to dictionary.\n \"\"\"\n data = {\n \"method\": self.method.__dict__,\n \"model\": self.model.__dict__,\n \"optimizer\": self.optimizer.__dict__,\n \"scheduler\": self.scheduler.__dict__,\n \"tokenizer\": self.tokenizer.__dict__,\n \"train\": self.train.__dict__,\n }\n\n return data\n\n @classmethod\n def from_dict(cls, config: Dict):\n \"\"\"\n Convert dictionary to TRLConfig.\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/3", "ground_truth": " return cls(\n method=get_method(config[\"method\"][\"name\"]).from_dict(config[\"method\"]),\n model=ModelConfig.from_dict(config[\"model\"]),\n tokenizer=TokenizerConfig.from_dict(config[\"tokenizer\"]),\n optimizer=OptimizerConfig.from_dict(config[\"optimizer\"]),\n scheduler=SchedulerConfig.from_dict(config[\"scheduler\"]),\n train=TrainConfig.from_dict(config[\"train\"]),\n )\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "data", "configs.py"], "context_start_lineno": 0, "lineno": 258, "function_name": "from_dict"}, "groundtruth": " return cls(\n method=get_method(config[\"method\"][\"name\"]).from_dict(config[\"method\"]),\n model=ModelConfig.from_dict(config[\"model\"]),\n tokenizer=TokenizerConfig.from_dict(config[\"tokenizer\"]),\n optimizer=OptimizerConfig.from_dict(config[\"optimizer\"]),\n scheduler=SchedulerConfig.from_dict(config[\"scheduler\"]),\n train=TrainConfig.from_dict(config[\"train\"]),\n )\n"} +{"prompt": "import sys\nfrom dataclasses import dataclass\nfrom typing import Any, Dict\n\n# specifies a dictionary of method configs\n_METHODS: Dict[str, Any] = {} # registry\n\n\ndef register_method(name):\n \"\"\"Decorator used register a method config\n Args:\n name: Name of the method\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/4", "ground_truth": " def register_class(cls, name):\n _METHODS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n\n if isinstance(name, str):\n name = name.lower()\n return lambda c: register_class(c, name)\n\n cls = name\n name = cls.__name__\n register_class(cls, name.lower())\n\n return cls\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "data", "method_configs.py"], "context_start_lineno": 0, "lineno": 14, "function_name": "register_method"}, "groundtruth": " def register_class(cls, name):\n _METHODS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n\n if isinstance(name, str):\n name = name.lower()\n return lambda c: register_class(c, name)\n\n cls = name\n name = cls.__name__\n register_class(cls, name.lower())\n\n return cls\n"} +{"prompt": "import sys\nfrom dataclasses import dataclass\nfrom typing import Any, Dict\n\n# specifies a dictionary of method configs\n_METHODS: Dict[str, Any] = {} # registry\n\n\ndef register_method(name):\n \"\"\"Decorator used register a method config\n Args:\n name: Name of the method\n \"\"\"\n\n def register_class(cls, name):", "metadata": {"task_id": "CarperAI--trlx/5", "ground_truth": " _METHODS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "data", "method_configs.py"], "context_start_lineno": 0, "lineno": 15, "function_name": "register_class"}, "groundtruth": " _METHODS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n"} +{"prompt": "import sys\nfrom dataclasses import dataclass\nfrom typing import Any, Dict\n\n# specifies a dictionary of method configs\n_METHODS: Dict[str, Any] = {} # registry\n\n\ndef register_method(name):\n \"\"\"Decorator used register a method config\n Args:\n name: Name of the method\n \"\"\"\n\n def register_class(cls, name):\n _METHODS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n\n if isinstance(name, str):\n name = name.lower()\n return lambda c: register_class(c, name)\n\n cls = name\n name = cls.__name__\n register_class(cls, name.lower())\n\n return cls\n\n\n@dataclass\n@register_method\nclass MethodConfig:\n \"\"\"\n Config for a certain RL method.\n\n :param name: Name of the method\n :type name: str\n \"\"\"\n\n name: str\n\n @classmethod\n def from_dict(cls, config: Dict[str, Any]):\n return cls(**config)\n\n\ndef get_method(name: str) -> MethodConfig:\n \"\"\"\n Return constructor for specified method config\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/6", "ground_truth": " name = name.lower()\n if name in _METHODS:\n return _METHODS[name]\n else:\n raise Exception(\"Error: Trying to access a method that has not been registered\")\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "data", "method_configs.py"], "context_start_lineno": 0, "lineno": 51, "function_name": "get_method"}, "groundtruth": " name = name.lower()\n if name in _METHODS:\n return _METHODS[name]\n else:\n raise Exception(\"Error: Trying to access a method that has not been registered\")\n"} +{"prompt": "import sys\nfrom abc import abstractmethod\nfrom typing import Any, Callable, Dict, Iterable\n\nfrom trlx.data.configs import TRLConfig\nfrom trlx.pipeline import BaseRolloutStore\n\n# specifies a dictionary of architectures\n_TRAINERS: Dict[str, Any] = {} # registry\n\n\ndef register_trainer(name):\n \"\"\"Decorator used to register a trainer\n Args:\n name: Name of the trainer type to register\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/7", "ground_truth": " def register_class(cls, name):\n _TRAINERS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n\n if isinstance(name, str):\n name = name.lower()\n return lambda c: register_class(c, name)\n\n cls = name\n name = cls.__name__\n register_class(cls, name.lower())\n\n return cls\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "trainer", "__init__.py"], "context_start_lineno": 0, "lineno": 17, "function_name": "register_trainer"}, "groundtruth": " def register_class(cls, name):\n _TRAINERS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n\n if isinstance(name, str):\n name = name.lower()\n return lambda c: register_class(c, name)\n\n cls = name\n name = cls.__name__\n register_class(cls, name.lower())\n\n return cls\n"} +{"prompt": "import sys\nfrom abc import abstractmethod\nfrom typing import Any, Callable, Dict, Iterable\n\nfrom trlx.data.configs import TRLConfig\nfrom trlx.pipeline import BaseRolloutStore\n\n# specifies a dictionary of architectures\n_TRAINERS: Dict[str, Any] = {} # registry\n\n\ndef register_trainer(name):\n \"\"\"Decorator used to register a trainer\n Args:\n name: Name of the trainer type to register\n \"\"\"\n\n def register_class(cls, name):", "metadata": {"task_id": "CarperAI--trlx/8", "ground_truth": " _TRAINERS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "trainer", "__init__.py"], "context_start_lineno": 0, "lineno": 18, "function_name": "register_class"}, "groundtruth": " _TRAINERS[name] = cls\n setattr(sys.modules[__name__], name, cls)\n return cls\n"} +{"prompt": "from typing import Callable\n\n# Register load pipelines via module import\nfrom trlx.pipeline import _DATAPIPELINE\nfrom trlx.pipeline.offline_pipeline import PromptPipeline\n\n# Register load trainers via module import\nfrom trlx.trainer import _TRAINERS, register_trainer\nfrom trlx.trainer.accelerate_ilql_trainer import AccelerateILQLTrainer\nfrom trlx.trainer.accelerate_ppo_trainer import AcceleratePPOTrainer\nfrom trlx.trainer.accelerate_sft_trainer import AccelerateSFTTrainer\n\ntry:\n from trlx.trainer.nemo_ilql_trainer import NeMoILQLTrainer\nexcept ImportError:\n # NeMo is not installed\n def _trainer_unavailble(name):", "metadata": {"task_id": "CarperAI--trlx/9", "ground_truth": " def log_error(*args, **kwargs):\n raise ImportError(f\"Unable to import NeMo so {name} is unavailable\")\n\n return register_trainer(name)(log_error)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "loading.py"], "context_start_lineno": 0, "lineno": 17, "function_name": "_trainer_unavailble"}, "groundtruth": " def log_error(*args, **kwargs):\n raise ImportError(f\"Unable to import NeMo so {name} is unavailable\")\n\n return register_trainer(name)(log_error)\n"} +{"prompt": "# Copyright 2023 Optuna, Hugging Face, CarperAI\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Logging utilities.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport threading\nfrom logging import CRITICAL # NOQA\nfrom logging import DEBUG # NOQA\nfrom logging import ERROR # NOQA\nfrom logging import FATAL # NOQA\nfrom logging import INFO # NOQA\nfrom logging import NOTSET # NOQA\nfrom logging import WARN # NOQA\nfrom logging import WARNING # NOQA\nfrom typing import Optional\n\nimport torch\nfrom tqdm import auto as tqdm_lib\n\n_lock = threading.Lock()\n_default_handler: Optional[logging.Handler] = None\n\nlog_levels = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n}\n\n_default_log_level = logging.INFO\n\n\ndef _get_default_logging_level():\n \"\"\"\n If `TRLX_VERBOSITY` env var is set to one of the valid choices, return that as the new default level. If it is\n not - fall back to `_default_log_level`\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/10", "ground_truth": " env_level_str = os.getenv(\"TRLX_VERBOSITY\", None)\n if env_level_str:\n if env_level_str.lower() in log_levels:\n return log_levels[env_level_str.lower()]\n else:\n logging.getLogger().warning(\n f\"Unknown option TRLX_VERBOSITY={env_level_str}, \" f\"has to be one of: { ', '.join(log_levels.keys()) }\"\n )\n return _default_log_level\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "logging.py"], "context_start_lineno": 0, "lineno": 51, "function_name": "_get_default_logging_level"}, "groundtruth": " env_level_str = os.getenv(\"TRLX_VERBOSITY\", None)\n if env_level_str:\n if env_level_str.lower() in log_levels:\n return log_levels[env_level_str.lower()]\n else:\n logging.getLogger().warning(\n f\"Unknown option TRLX_VERBOSITY={env_level_str}, \" f\"has to be one of: { ', '.join(log_levels.keys()) }\"\n )\n return _default_log_level\n"} +{"prompt": "# Copyright 2023 Optuna, Hugging Face, CarperAI\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Logging utilities.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport threading\nfrom logging import CRITICAL # NOQA\nfrom logging import DEBUG # NOQA\nfrom logging import ERROR # NOQA\nfrom logging import FATAL # NOQA\nfrom logging import INFO # NOQA\nfrom logging import NOTSET # NOQA\nfrom logging import WARN # NOQA\nfrom logging import WARNING # NOQA\nfrom typing import Optional\n\nimport torch\nfrom tqdm import auto as tqdm_lib\n\n_lock = threading.Lock()\n_default_handler: Optional[logging.Handler] = None\n\nlog_levels = {\n \"debug\": logging.DEBUG,\n \"info\": logging.INFO,\n \"warning\": logging.WARNING,\n \"error\": logging.ERROR,\n \"critical\": logging.CRITICAL,\n}\n\n_default_log_level = logging.INFO\n\n\ndef _get_default_logging_level():\n \"\"\"\n If `TRLX_VERBOSITY` env var is set to one of the valid choices, return that as the new default level. If it is\n not - fall back to `_default_log_level`\n \"\"\"\n env_level_str = os.getenv(\"TRLX_VERBOSITY\", None)\n if env_level_str:\n if env_level_str.lower() in log_levels:\n return log_levels[env_level_str.lower()]\n else:\n logging.getLogger().warning(\n f\"Unknown option TRLX_VERBOSITY={env_level_str}, \" f\"has to be one of: { ', '.join(log_levels.keys()) }\"\n )\n return _default_log_level\n\n\ndef _get_library_name() -> str:\n return __name__.split(\".\")[0]\n\n\ndef _get_library_root_logger() -> logging.Logger:\n return logging.getLogger(_get_library_name())\n\n\ndef _configure_library_root_logger() -> None:", "metadata": {"task_id": "CarperAI--trlx/11", "ground_truth": " global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n library_root_logger.propagate = False\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "logging.py"], "context_start_lineno": 0, "lineno": 71, "function_name": "_configure_library_root_logger"}, "groundtruth": " global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n library_root_logger.propagate = False\n"} +{"prompt": "import math\nimport os\nimport random\nimport subprocess\nimport time\nfrom dataclasses import is_dataclass\nfrom enum import Enum\nfrom numbers import Number\nfrom typing import Any, Dict, Tuple\n\nimport numpy as np\nimport torch\nfrom accelerate import Accelerator\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LinearLR\n\n\ndef print_rank_0(*message):\n \"\"\"\n Print only once from the main rank\n \"\"\"\n if os.environ.get(\"RANK\", \"0\") == \"0\":\n print(*message)\n\n\ndef significant(x: Number, ndigits=2) -> Number:\n \"\"\"\n Cut the number up to its `ndigits` after the most significant\n \"\"\"\n if isinstance(x, torch.Tensor):\n x = x.item()\n\n if not isinstance(x, Number) or math.isnan(x) or x == 0:\n return x\n\n return round(x, ndigits - int(math.floor(math.log10(abs(x)))))\n\n\ndef set_seed(seed: int):\n \"\"\"\n Sets seeds across package dependencies for reproducibility.\n \"\"\"\n seed += int(os.environ.get(\"RANK\", 0))\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\n\n# Training utils\n\n\ndef get_distributed_config(accelerator: Accelerator):\n \"\"\"\n Return accelerator distributed config\n \"\"\"\n\n dist_config = {\n \"mixed_precision\": accelerator.mixed_precision,\n \"num_gpus\": accelerator.num_processes,\n }\n\n if accelerator.state.deepspeed_plugin is not None:\n ds_plugin = accelerator.state.deepspeed_plugin\n dist_config.update(\n {\n \"gradient_accumulation_steps\": ds_plugin.gradient_accumulation_steps,\n \"gradient_clipping\": ds_plugin.gradient_clipping,\n \"zero_stage\": ds_plugin.zero_stage,\n \"offload_optimizer_device\": ds_plugin.offload_optimizer_device,\n \"offload_param_device\": ds_plugin.offload_param_device,\n }\n )\n\n return dist_config\n\n\nclass OptimizerName(str, Enum):\n \"\"\"Supported optimizer names\"\"\"\n\n ADAM: str = \"adam\"\n ADAMW: str = \"adamw\"\n ADAM_8BIT_BNB: str = \"adam_8bit_bnb\"\n ADAMW_8BIT_BNB: str = \"adamw_8bit_bnb\"\n SGD: str = \"sgd\"\n\n\ndef get_optimizer_class(name: OptimizerName):\n \"\"\"\n Returns the optimizer class with the given name\n\n Args:\n name (str): Name of the optimizer as found in `OptimizerNames`\n \"\"\"\n if name == OptimizerName.ADAM:\n return torch.optim.Adam\n if name == OptimizerName.ADAMW:\n return torch.optim.AdamW\n if name == OptimizerName.ADAM_8BIT_BNB.value:\n try:\n from bitsandbytes.optim import Adam8bit\n\n return Adam8bit\n except ImportError:\n raise ImportError(\n \"You must install the `bitsandbytes` package to use the 8-bit Adam. \"\n \"Install with: `pip install bitsandbytes`\"\n )\n if name == OptimizerName.ADAMW_8BIT_BNB.value:\n try:\n from bitsandbytes.optim import AdamW8bit\n\n return AdamW8bit\n except ImportError:\n raise ImportError(\n \"You must install the `bitsandbytes` package to use 8-bit AdamW. \"\n \"Install with: `pip install bitsandbytes`\"\n )\n if name == OptimizerName.SGD.value:\n return torch.optim.SGD\n supported_optimizers = [o.value for o in OptimizerName]\n raise ValueError(f\"`{name}` is not a supported optimizer. \" f\"Supported optimizers are: {supported_optimizers}\")\n\n\nclass SchedulerName(str, Enum):\n \"\"\"Supported scheduler names\"\"\"\n\n COSINE_ANNEALING = \"cosine_annealing\"\n LINEAR = \"linear\"\n\n\ndef get_scheduler_class(name: SchedulerName):\n \"\"\"\n Returns the scheduler class with the given name\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/12", "ground_truth": " if name == SchedulerName.COSINE_ANNEALING:\n return CosineAnnealingLR\n if name == SchedulerName.LINEAR:\n return LinearLR\n supported_schedulers = [s.value for s in SchedulerName]\n raise ValueError(f\"`{name}` is not a supported scheduler. \" f\"Supported schedulers are: {supported_schedulers}\")\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "__init__.py"], "context_start_lineno": 0, "lineno": 134, "function_name": "get_scheduler_class"}, "groundtruth": " if name == SchedulerName.COSINE_ANNEALING:\n return CosineAnnealingLR\n if name == SchedulerName.LINEAR:\n return LinearLR\n supported_schedulers = [s.value for s in SchedulerName]\n raise ValueError(f\"`{name}` is not a supported scheduler. \" f\"Supported schedulers are: {supported_schedulers}\")\n"} +{"prompt": "import functools\nfrom typing import Any, Dict, List, MutableMapping, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\n\ntry:\n from opendelta import (\n AdapterModel,\n BitFitModel,\n LoraModel,\n PrefixModel,\n SoftPromptModel,\n )\n\n HAS_OPENDELTA = True\nexcept ModuleNotFoundError:\n HAS_OPENDELTA = False\n\n\ndef make_head(n_embd: int, out: int, dtype: type = torch.float32) -> nn.Sequential:\n \"\"\"Returns a generic sequential MLP head.\"\"\"", "metadata": {"task_id": "CarperAI--trlx/13", "ground_truth": " return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 0, "lineno": 26, "function_name": "make_head"}, "groundtruth": " return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n"} +{"prompt": "import functools\nfrom typing import Any, Dict, List, MutableMapping, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\n\ntry:\n from opendelta import (\n AdapterModel,\n BitFitModel,\n LoraModel,\n PrefixModel,\n SoftPromptModel,\n )\n\n HAS_OPENDELTA = True\nexcept ModuleNotFoundError:\n HAS_OPENDELTA = False\n\n\ndef make_head(n_embd: int, out: int, dtype: type = torch.float32) -> nn.Sequential:\n \"\"\"Returns a generic sequential MLP head.\"\"\"\n return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n\n\ndef freeze_bottom_causal_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n hidden_layers = hf_get_decoder_blocks(model)\n if num_layers_unfrozen == 0:\n hidden_layers_to_freeze = list(hidden_layers)\n elif num_layers_unfrozen > 0:\n hidden_layers_to_freeze = list(hidden_layers)[:-num_layers_unfrozen]\n else:\n hidden_layers_to_freeze = []\n for layer in hidden_layers_to_freeze:\n layer.requires_grad_(False)\n\n\ndef freeze_bottom_seq2seq_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n if num_layers_unfrozen == -1:\n return\n shared_embed = model.shared\n decoder_embed = model.decoder.embed_tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/14", "ground_truth": " _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 0, "lineno": 74, "function_name": "rhasattr"}, "groundtruth": " _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n"} +{"prompt": "import functools\nfrom typing import Any, Dict, List, MutableMapping, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\n\ntry:\n from opendelta import (\n AdapterModel,\n BitFitModel,\n LoraModel,\n PrefixModel,\n SoftPromptModel,\n )\n\n HAS_OPENDELTA = True\nexcept ModuleNotFoundError:\n HAS_OPENDELTA = False\n\n\ndef make_head(n_embd: int, out: int, dtype: type = torch.float32) -> nn.Sequential:\n \"\"\"Returns a generic sequential MLP head.\"\"\"\n return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n\n\ndef freeze_bottom_causal_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n hidden_layers = hf_get_decoder_blocks(model)\n if num_layers_unfrozen == 0:\n hidden_layers_to_freeze = list(hidden_layers)\n elif num_layers_unfrozen > 0:\n hidden_layers_to_freeze = list(hidden_layers)[:-num_layers_unfrozen]\n else:\n hidden_layers_to_freeze = []\n for layer in hidden_layers_to_freeze:\n layer.requires_grad_(False)\n\n\ndef freeze_bottom_seq2seq_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n if num_layers_unfrozen == -1:\n return\n shared_embed = model.shared\n decoder_embed = model.decoder.embed_tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"\n _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n\n\ndef rgetattr(obj, attr: str, *args) -> object:\n \"\"\"A chain-able attribute version of getattr. For example, to get the\n attribute `foo.bar.baz` from `obj`, you can use:\n `rgetattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/31174427\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/15", "ground_truth": " def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 0, "lineno": 91, "function_name": "rgetattr"}, "groundtruth": " def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n"} +{"prompt": "import functools\nfrom typing import Any, Dict, List, MutableMapping, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\n\ntry:\n from opendelta import (\n AdapterModel,\n BitFitModel,\n LoraModel,\n PrefixModel,\n SoftPromptModel,\n )\n\n HAS_OPENDELTA = True\nexcept ModuleNotFoundError:\n HAS_OPENDELTA = False\n\n\ndef make_head(n_embd: int, out: int, dtype: type = torch.float32) -> nn.Sequential:\n \"\"\"Returns a generic sequential MLP head.\"\"\"\n return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n\n\ndef freeze_bottom_causal_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n hidden_layers = hf_get_decoder_blocks(model)\n if num_layers_unfrozen == 0:\n hidden_layers_to_freeze = list(hidden_layers)\n elif num_layers_unfrozen > 0:\n hidden_layers_to_freeze = list(hidden_layers)[:-num_layers_unfrozen]\n else:\n hidden_layers_to_freeze = []\n for layer in hidden_layers_to_freeze:\n layer.requires_grad_(False)\n\n\ndef freeze_bottom_seq2seq_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n if num_layers_unfrozen == -1:\n return\n shared_embed = model.shared\n decoder_embed = model.decoder.embed_tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"\n _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n\n\ndef rgetattr(obj, attr: str, *args) -> object:\n \"\"\"A chain-able attribute version of getattr. For example, to get the\n attribute `foo.bar.baz` from `obj`, you can use:\n `rgetattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/31174427\n \"\"\"\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n\n\ndef findattr(obj, attrs: Tuple[str]) -> Union[object, None]:", "metadata": {"task_id": "CarperAI--trlx/16", "ground_truth": " for attr in attrs:\n if rhasattr(obj, attr):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 0, "lineno": 98, "function_name": "findattr"}, "groundtruth": " for attr in attrs:\n if rhasattr(obj, attr):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n"} +{"prompt": "import functools\nfrom typing import Any, Dict, List, MutableMapping, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\n\ntry:\n from opendelta import (\n AdapterModel,\n BitFitModel,\n LoraModel,\n PrefixModel,\n SoftPromptModel,\n )\n\n HAS_OPENDELTA = True\nexcept ModuleNotFoundError:\n HAS_OPENDELTA = False\n\n\ndef make_head(n_embd: int, out: int, dtype: type = torch.float32) -> nn.Sequential:\n \"\"\"Returns a generic sequential MLP head.\"\"\"\n return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n\n\ndef freeze_bottom_causal_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n hidden_layers = hf_get_decoder_blocks(model)\n if num_layers_unfrozen == 0:\n hidden_layers_to_freeze = list(hidden_layers)\n elif num_layers_unfrozen > 0:\n hidden_layers_to_freeze = list(hidden_layers)[:-num_layers_unfrozen]\n else:\n hidden_layers_to_freeze = []\n for layer in hidden_layers_to_freeze:\n layer.requires_grad_(False)\n\n\ndef freeze_bottom_seq2seq_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n if num_layers_unfrozen == -1:\n return\n shared_embed = model.shared\n decoder_embed = model.decoder.embed_tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"\n _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n\n\ndef rgetattr(obj, attr: str, *args) -> object:\n \"\"\"A chain-able attribute version of getattr. For example, to get the\n attribute `foo.bar.baz` from `obj`, you can use:\n `rgetattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/31174427\n \"\"\"\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n\n\ndef findattr(obj, attrs: Tuple[str]) -> Union[object, None]:\n for attr in attrs:\n if rhasattr(obj, attr):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n\n\ndef hf_get_decoder(model: nn.Module) -> nn.Module:\n \"\"\"Returns the causal decoder backbone of the specified HuggingFace transformers\n model.\n NOTE: Different model configurations have different causal decoder attribute\n names.\n - transformer: (GPT2LMHeadModel, GPTJConfig)\n - model.decoder: (OPTConfig, BloomConfig)\n - gpt_neox: (GPTNeoXConfig)\n \"\"\"\n decoder_attrs = (\"transformer\", \"model.decoder\", \"gpt_neox\", \"decoder\")\n return findattr(model, decoder_attrs)\n\n\ndef hf_get_decoder_final_norm(model: nn.Module) -> float:\n \"\"\"Returns the final (layer) norm of the specified decoder.\n NOTE: Different model configurations have different final norm attribute names.\n - transformer.ln_f: (GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.final_layer_norm: (OPTForCausalLM)\n - gpt_neox.layers.final_layer_norm: (GPTNeoXForCausalLM)\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/17", "ground_truth": " norm_attrs = (\n \"transformer.ln_f\",\n \"model.decoder.final_layer_norm\",\n \"decoder.final_layer_norm\",\n \"gpt_neox.final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 0, "lineno": 124, "function_name": "hf_get_decoder_final_norm"}, "groundtruth": " norm_attrs = (\n \"transformer.ln_f\",\n \"model.decoder.final_layer_norm\",\n \"decoder.final_layer_norm\",\n \"gpt_neox.final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n"} +{"prompt": "import functools\nfrom typing import Any, Dict, List, MutableMapping, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\n\ntry:\n from opendelta import (\n AdapterModel,\n BitFitModel,\n LoraModel,\n PrefixModel,\n SoftPromptModel,\n )\n\n HAS_OPENDELTA = True\nexcept ModuleNotFoundError:\n HAS_OPENDELTA = False\n\n\ndef make_head(n_embd: int, out: int, dtype: type = torch.float32) -> nn.Sequential:\n \"\"\"Returns a generic sequential MLP head.\"\"\"\n return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n\n\ndef freeze_bottom_causal_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n hidden_layers = hf_get_decoder_blocks(model)\n if num_layers_unfrozen == 0:\n hidden_layers_to_freeze = list(hidden_layers)\n elif num_layers_unfrozen > 0:\n hidden_layers_to_freeze = list(hidden_layers)[:-num_layers_unfrozen]\n else:\n hidden_layers_to_freeze = []\n for layer in hidden_layers_to_freeze:\n layer.requires_grad_(False)\n\n\ndef freeze_bottom_seq2seq_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n if num_layers_unfrozen == -1:\n return\n shared_embed = model.shared\n decoder_embed = model.decoder.embed_tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"\n _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n\n\ndef rgetattr(obj, attr: str, *args) -> object:\n \"\"\"A chain-able attribute version of getattr. For example, to get the\n attribute `foo.bar.baz` from `obj`, you can use:\n `rgetattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/31174427\n \"\"\"\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n\n\ndef findattr(obj, attrs: Tuple[str]) -> Union[object, None]:\n for attr in attrs:\n if rhasattr(obj, attr):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n\n\ndef hf_get_decoder(model: nn.Module) -> nn.Module:\n \"\"\"Returns the causal decoder backbone of the specified HuggingFace transformers\n model.\n NOTE: Different model configurations have different causal decoder attribute\n names.\n - transformer: (GPT2LMHeadModel, GPTJConfig)\n - model.decoder: (OPTConfig, BloomConfig)\n - gpt_neox: (GPTNeoXConfig)\n \"\"\"\n decoder_attrs = (\"transformer\", \"model.decoder\", \"gpt_neox\", \"decoder\")\n return findattr(model, decoder_attrs)\n\n\ndef hf_get_decoder_final_norm(model: nn.Module) -> float:\n \"\"\"Returns the final (layer) norm of the specified decoder.\n NOTE: Different model configurations have different final norm attribute names.\n - transformer.ln_f: (GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.final_layer_norm: (OPTForCausalLM)\n - gpt_neox.layers.final_layer_norm: (GPTNeoXForCausalLM)\n \"\"\"\n norm_attrs = (\n \"transformer.ln_f\",\n \"model.decoder.final_layer_norm\",\n \"decoder.final_layer_norm\",\n \"gpt_neox.final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n\n\ndef hf_get_decoder_blocks(model: nn.Module) -> Tuple[nn.Module]:\n \"\"\"Returns the decoder hidden layers of the specified model.\n NOTE: Different model configurations have different hidden layer attribute names.\n - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.layers: (OPTForCausalLM)\n - gpt_neox.layers: (GPTNeoXForCausalLM)\n - decoder.block: (T5ForConditionalGeneration)\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/18", "ground_truth": " hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 0, "lineno": 141, "function_name": "hf_get_decoder_blocks"}, "groundtruth": " hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n"} +{"prompt": "import functools\nfrom typing import Any, Dict, List, MutableMapping, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\n\ntry:\n from opendelta import (\n AdapterModel,\n BitFitModel,\n LoraModel,\n PrefixModel,\n SoftPromptModel,\n )\n\n HAS_OPENDELTA = True\nexcept ModuleNotFoundError:\n HAS_OPENDELTA = False\n\n\ndef make_head(n_embd: int, out: int, dtype: type = torch.float32) -> nn.Sequential:\n \"\"\"Returns a generic sequential MLP head.\"\"\"\n return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n\n\ndef freeze_bottom_causal_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n hidden_layers = hf_get_decoder_blocks(model)\n if num_layers_unfrozen == 0:\n hidden_layers_to_freeze = list(hidden_layers)\n elif num_layers_unfrozen > 0:\n hidden_layers_to_freeze = list(hidden_layers)[:-num_layers_unfrozen]\n else:\n hidden_layers_to_freeze = []\n for layer in hidden_layers_to_freeze:\n layer.requires_grad_(False)\n\n\ndef freeze_bottom_seq2seq_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n if num_layers_unfrozen == -1:\n return\n shared_embed = model.shared\n decoder_embed = model.decoder.embed_tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"\n _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n\n\ndef rgetattr(obj, attr: str, *args) -> object:\n \"\"\"A chain-able attribute version of getattr. For example, to get the\n attribute `foo.bar.baz` from `obj`, you can use:\n `rgetattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/31174427\n \"\"\"\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n\n\ndef findattr(obj, attrs: Tuple[str]) -> Union[object, None]:\n for attr in attrs:\n if rhasattr(obj, attr):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n\n\ndef hf_get_decoder(model: nn.Module) -> nn.Module:\n \"\"\"Returns the causal decoder backbone of the specified HuggingFace transformers\n model.\n NOTE: Different model configurations have different causal decoder attribute\n names.\n - transformer: (GPT2LMHeadModel, GPTJConfig)\n - model.decoder: (OPTConfig, BloomConfig)\n - gpt_neox: (GPTNeoXConfig)\n \"\"\"\n decoder_attrs = (\"transformer\", \"model.decoder\", \"gpt_neox\", \"decoder\")\n return findattr(model, decoder_attrs)\n\n\ndef hf_get_decoder_final_norm(model: nn.Module) -> float:\n \"\"\"Returns the final (layer) norm of the specified decoder.\n NOTE: Different model configurations have different final norm attribute names.\n - transformer.ln_f: (GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.final_layer_norm: (OPTForCausalLM)\n - gpt_neox.layers.final_layer_norm: (GPTNeoXForCausalLM)\n \"\"\"\n norm_attrs = (\n \"transformer.ln_f\",\n \"model.decoder.final_layer_norm\",\n \"decoder.final_layer_norm\",\n \"gpt_neox.final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n\n\ndef hf_get_decoder_blocks(model: nn.Module) -> Tuple[nn.Module]:\n \"\"\"Returns the decoder hidden layers of the specified model.\n NOTE: Different model configurations have different hidden layer attribute names.\n - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.layers: (OPTForCausalLM)\n - gpt_neox.layers: (GPTNeoXForCausalLM)\n - decoder.block: (T5ForConditionalGeneration)\n \"\"\"\n hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n\n\ndef hf_get_lm_head(model: nn.Module) -> nn.Module:\n \"\"\"Returns the language modeling (lm) head of the specified HuggingFace\n transformers model.\n NOTE: Different model configurations have different `lm_head` attribute names.\n - lm_head: (GPT2LMHeadModel, BloomForCausalLM)\n - embed_out: (GPTNeoXForCausalLM)\n \"\"\"\n return model.get_output_embeddings()\n\n\ndef hf_get_hidden_size(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the hidden layer dimensionality of the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different hidden size attribute names.\n - hidden_size: (OPTConfig, BloomConfig)\n - n_embd: (GPT2Config, GPTJConfig)\n - d_model: (PegasusConfig, XLNetConfig)\n \"\"\"\n hidden_size_attrs = (\"hidden_size\", \"n_embd\", \"d_model\")\n return findattr(config, hidden_size_attrs)\n\n\ndef hf_get_num_hidden_layers(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the number of hidden layers in the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different number-of-layers attribute\n names.\n - num_hidden_layers: (GPTNeoXConfig, OPTConfig)\n - n_layer: (GPT2Config, GPTJConfig, BloomConfig)\n \"\"\"\n num_hidden_layers_attrs = (\"num_hidden_layers\", \"n_layer\")\n return findattr(config, num_hidden_layers_attrs)\n\n\ndef get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:\n \"\"\"\n Computes element-wise mean and variance of the tensor across processes\n \"\"\"\n sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)\n dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)\n global_sum, count = sum_and_count\n global_mean = global_sum / count\n\n sum_var = torch.sum((xs - global_mean) ** 2)\n dist.all_reduce(sum_var, dist.ReduceOp.SUM)\n global_var = sum_var / count\n return global_mean, global_var, count\n\n\ndef whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:\n \"\"\"Whitens values\"\"\"\n if distributed and dist.is_initialized():\n mean, var, _ = get_global_statistics(xs)\n else:\n var, mean = torch.var_mean(xs)\n\n whitened = (xs - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef logprobs_of_labels(logits, labels):\n \"\"\"Log probabilities of the labels\n\n These are calculated from the logits.\"\"\"\n logprobs = F.log_softmax(logits, dim=-1)\n logprobs_labels = torch.gather(logprobs, dim=-1, index=labels.unsqueeze(-1))\n return logprobs_labels.squeeze(-1)\n\n\ndef flatten_dict(\n d: Union[dict, MutableMapping],\n parent_key: str = \"\",\n sep: str = \"/\",\n) -> dict:\n # From: https://stackoverflow.com/a/6027615\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef get_tensor_stats(xs: torch.Tensor, mask: torch.Tensor, n: int):\n mean = (xs * mask).sum() / n\n return dict(\n mean=mean,\n min=torch.where(mask.bool(), xs, np.inf).min(),\n max=torch.where(mask.bool(), xs, -np.inf).max(),\n std=torch.sqrt(((xs - mean) * mask).pow(2).sum() / n),\n )\n\n\nclass RunningMoments:\n def __init__(self):\n \"\"\"\n Calculates the running mean and standard deviation of a data stream. Modified version of\n https://github.com/DLR-RM/stable-baselines3/blob/a6f5049a99a4c21a6f0bcce458ca3306cef310e0/stable_baselines3/common/running_mean_std.py\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/19", "ground_truth": " self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 0, "lineno": 256, "function_name": "__init__"}, "groundtruth": " self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n"} +{"prompt": "import functools\nfrom typing import Any, Dict, List, MutableMapping, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport transformers\n\ntry:\n from opendelta import (\n AdapterModel,\n BitFitModel,\n LoraModel,\n PrefixModel,\n SoftPromptModel,\n )\n\n HAS_OPENDELTA = True\nexcept ModuleNotFoundError:\n HAS_OPENDELTA = False\n\n\ndef make_head(n_embd: int, out: int, dtype: type = torch.float32) -> nn.Sequential:\n \"\"\"Returns a generic sequential MLP head.\"\"\"\n return nn.Sequential(\n nn.Linear(n_embd, n_embd * 2, dtype=dtype),\n nn.ReLU(),\n nn.Linear(n_embd * 2, out, dtype=dtype),\n )\n\n\ndef freeze_bottom_causal_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n hidden_layers = hf_get_decoder_blocks(model)\n if num_layers_unfrozen == 0:\n hidden_layers_to_freeze = list(hidden_layers)\n elif num_layers_unfrozen > 0:\n hidden_layers_to_freeze = list(hidden_layers)[:-num_layers_unfrozen]\n else:\n hidden_layers_to_freeze = []\n for layer in hidden_layers_to_freeze:\n layer.requires_grad_(False)\n\n\ndef freeze_bottom_seq2seq_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n if num_layers_unfrozen == -1:\n return\n shared_embed = model.shared\n decoder_embed = model.decoder.embed_tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"\n _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n\n\ndef rgetattr(obj, attr: str, *args) -> object:\n \"\"\"A chain-able attribute version of getattr. For example, to get the\n attribute `foo.bar.baz` from `obj`, you can use:\n `rgetattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/31174427\n \"\"\"\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n\n\ndef findattr(obj, attrs: Tuple[str]) -> Union[object, None]:\n for attr in attrs:\n if rhasattr(obj, attr):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n\n\ndef hf_get_decoder(model: nn.Module) -> nn.Module:\n \"\"\"Returns the causal decoder backbone of the specified HuggingFace transformers\n model.\n NOTE: Different model configurations have different causal decoder attribute\n names.\n - transformer: (GPT2LMHeadModel, GPTJConfig)\n - model.decoder: (OPTConfig, BloomConfig)\n - gpt_neox: (GPTNeoXConfig)\n \"\"\"\n decoder_attrs = (\"transformer\", \"model.decoder\", \"gpt_neox\", \"decoder\")\n return findattr(model, decoder_attrs)\n\n\ndef hf_get_decoder_final_norm(model: nn.Module) -> float:\n \"\"\"Returns the final (layer) norm of the specified decoder.\n NOTE: Different model configurations have different final norm attribute names.\n - transformer.ln_f: (GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.final_layer_norm: (OPTForCausalLM)\n - gpt_neox.layers.final_layer_norm: (GPTNeoXForCausalLM)\n \"\"\"\n norm_attrs = (\n \"transformer.ln_f\",\n \"model.decoder.final_layer_norm\",\n \"decoder.final_layer_norm\",\n \"gpt_neox.final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n\n\ndef hf_get_decoder_blocks(model: nn.Module) -> Tuple[nn.Module]:\n \"\"\"Returns the decoder hidden layers of the specified model.\n NOTE: Different model configurations have different hidden layer attribute names.\n - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.layers: (OPTForCausalLM)\n - gpt_neox.layers: (GPTNeoXForCausalLM)\n - decoder.block: (T5ForConditionalGeneration)\n \"\"\"\n hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n\n\ndef hf_get_lm_head(model: nn.Module) -> nn.Module:\n \"\"\"Returns the language modeling (lm) head of the specified HuggingFace\n transformers model.\n NOTE: Different model configurations have different `lm_head` attribute names.\n - lm_head: (GPT2LMHeadModel, BloomForCausalLM)\n - embed_out: (GPTNeoXForCausalLM)\n \"\"\"\n return model.get_output_embeddings()\n\n\ndef hf_get_hidden_size(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the hidden layer dimensionality of the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different hidden size attribute names.\n - hidden_size: (OPTConfig, BloomConfig)\n - n_embd: (GPT2Config, GPTJConfig)\n - d_model: (PegasusConfig, XLNetConfig)\n \"\"\"\n hidden_size_attrs = (\"hidden_size\", \"n_embd\", \"d_model\")\n return findattr(config, hidden_size_attrs)\n\n\ndef hf_get_num_hidden_layers(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the number of hidden layers in the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different number-of-layers attribute\n names.\n - num_hidden_layers: (GPTNeoXConfig, OPTConfig)\n - n_layer: (GPT2Config, GPTJConfig, BloomConfig)\n \"\"\"\n num_hidden_layers_attrs = (\"num_hidden_layers\", \"n_layer\")\n return findattr(config, num_hidden_layers_attrs)\n\n\ndef get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:\n \"\"\"\n Computes element-wise mean and variance of the tensor across processes\n \"\"\"\n sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)\n dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)\n global_sum, count = sum_and_count\n global_mean = global_sum / count\n\n sum_var = torch.sum((xs - global_mean) ** 2)\n dist.all_reduce(sum_var, dist.ReduceOp.SUM)\n global_var = sum_var / count\n return global_mean, global_var, count\n\n\ndef whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:\n \"\"\"Whitens values\"\"\"\n if distributed and dist.is_initialized():\n mean, var, _ = get_global_statistics(xs)\n else:\n var, mean = torch.var_mean(xs)\n\n whitened = (xs - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef logprobs_of_labels(logits, labels):\n \"\"\"Log probabilities of the labels\n\n These are calculated from the logits.\"\"\"\n logprobs = F.log_softmax(logits, dim=-1)\n logprobs_labels = torch.gather(logprobs, dim=-1, index=labels.unsqueeze(-1))\n return logprobs_labels.squeeze(-1)\n\n\ndef flatten_dict(\n d: Union[dict, MutableMapping],\n parent_key: str = \"\",\n sep: str = \"/\",\n) -> dict:\n # From: https://stackoverflow.com/a/6027615\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef get_tensor_stats(xs: torch.Tensor, mask: torch.Tensor, n: int):\n mean = (xs * mask).sum() / n\n return dict(\n mean=mean,\n min=torch.where(mask.bool(), xs, np.inf).min(),\n max=torch.where(mask.bool(), xs, -np.inf).max(),\n std=torch.sqrt(((xs - mean) * mask).pow(2).sum() / n),\n )\n\n\nclass RunningMoments:\n def __init__(self):\n \"\"\"\n Calculates the running mean and standard deviation of a data stream. Modified version of\n https://github.com/DLR-RM/stable-baselines3/blob/a6f5049a99a4c21a6f0bcce458ca3306cef310e0/stable_baselines3/common/running_mean_std.py\n \"\"\"\n self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n\n def update(self, xs: torch.Tensor) -> Tuple[float, float]:\n \"\"\"Updates running moments from batch's moments computed across ranks\"\"\"", "metadata": {"task_id": "CarperAI--trlx/20", "ground_truth": " if dist.is_initialized():\n xs_mean, xs_var, xs_count = get_global_statistics(xs)\n else:\n xs_count = xs.numel()\n xs_var, xs_mean = torch.var_mean(xs, unbiased=False)\n\n delta = xs_mean - self.mean\n tot_count = self.count + xs_count\n\n new_sum = xs_var * xs_count\n # correct old_sum deviation accounting for the new mean\n old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count\n tot_sum = old_sum + new_sum\n\n self.mean += delta * xs_count / tot_count\n self.var = tot_sum / tot_count\n self.std = (self.var * tot_count / (tot_count - 1)).sqrt()\n self.count = tot_count\n\n return xs_mean, (xs_var * xs_count / (xs_count - 1)).sqrt()\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 0, "lineno": 263, "function_name": "update"}, "groundtruth": " if dist.is_initialized():\n xs_mean, xs_var, xs_count = get_global_statistics(xs)\n else:\n xs_count = xs.numel()\n xs_var, xs_mean = torch.var_mean(xs, unbiased=False)\n\n delta = xs_mean - self.mean\n tot_count = self.count + xs_count\n\n new_sum = xs_var * xs_count\n # correct old_sum deviation accounting for the new mean\n old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count\n tot_sum = old_sum + new_sum\n\n self.mean += delta * xs_count / tot_count\n self.var = tot_sum / tot_count\n self.std = (self.var * tot_count / (tot_count - 1)).sqrt()\n self.count = tot_count\n\n return xs_mean, (xs_var * xs_count / (xs_count - 1)).sqrt()\n"} +{"prompt": "_get_decoder_blocks(model)\n if num_layers_unfrozen == 0:\n hidden_layers_to_freeze = list(hidden_layers)\n elif num_layers_unfrozen > 0:\n hidden_layers_to_freeze = list(hidden_layers)[:-num_layers_unfrozen]\n else:\n hidden_layers_to_freeze = []\n for layer in hidden_layers_to_freeze:\n layer.requires_grad_(False)\n\n\ndef freeze_bottom_seq2seq_layers(model: nn.Module, num_layers_unfrozen: int = 0):\n \"\"\"Freezes the bottom transformer block layers of the specified model.\"\"\"\n if num_layers_unfrozen == -1:\n return\n shared_embed = model.shared\n decoder_embed = model.decoder.embed_tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"\n _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n\n\ndef rgetattr(obj, attr: str, *args) -> object:\n \"\"\"A chain-able attribute version of getattr. For example, to get the\n attribute `foo.bar.baz` from `obj`, you can use:\n `rgetattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/31174427\n \"\"\"\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n\n\ndef findattr(obj, attrs: Tuple[str]) -> Union[object, None]:\n for attr in attrs:\n if rhasattr(obj, attr):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n\n\ndef hf_get_decoder(model: nn.Module) -> nn.Module:\n \"\"\"Returns the causal decoder backbone of the specified HuggingFace transformers\n model.\n NOTE: Different model configurations have different causal decoder attribute\n names.\n - transformer: (GPT2LMHeadModel, GPTJConfig)\n - model.decoder: (OPTConfig, BloomConfig)\n - gpt_neox: (GPTNeoXConfig)\n \"\"\"\n decoder_attrs = (\"transformer\", \"model.decoder\", \"gpt_neox\", \"decoder\")\n return findattr(model, decoder_attrs)\n\n\ndef hf_get_decoder_final_norm(model: nn.Module) -> float:\n \"\"\"Returns the final (layer) norm of the specified decoder.\n NOTE: Different model configurations have different final norm attribute names.\n - transformer.ln_f: (GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.final_layer_norm: (OPTForCausalLM)\n - gpt_neox.layers.final_layer_norm: (GPTNeoXForCausalLM)\n \"\"\"\n norm_attrs = (\n \"transformer.ln_f\",\n \"model.decoder.final_layer_norm\",\n \"decoder.final_layer_norm\",\n \"gpt_neox.final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n\n\ndef hf_get_decoder_blocks(model: nn.Module) -> Tuple[nn.Module]:\n \"\"\"Returns the decoder hidden layers of the specified model.\n NOTE: Different model configurations have different hidden layer attribute names.\n - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.layers: (OPTForCausalLM)\n - gpt_neox.layers: (GPTNeoXForCausalLM)\n - decoder.block: (T5ForConditionalGeneration)\n \"\"\"\n hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n\n\ndef hf_get_lm_head(model: nn.Module) -> nn.Module:\n \"\"\"Returns the language modeling (lm) head of the specified HuggingFace\n transformers model.\n NOTE: Different model configurations have different `lm_head` attribute names.\n - lm_head: (GPT2LMHeadModel, BloomForCausalLM)\n - embed_out: (GPTNeoXForCausalLM)\n \"\"\"\n return model.get_output_embeddings()\n\n\ndef hf_get_hidden_size(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the hidden layer dimensionality of the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different hidden size attribute names.\n - hidden_size: (OPTConfig, BloomConfig)\n - n_embd: (GPT2Config, GPTJConfig)\n - d_model: (PegasusConfig, XLNetConfig)\n \"\"\"\n hidden_size_attrs = (\"hidden_size\", \"n_embd\", \"d_model\")\n return findattr(config, hidden_size_attrs)\n\n\ndef hf_get_num_hidden_layers(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the number of hidden layers in the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different number-of-layers attribute\n names.\n - num_hidden_layers: (GPTNeoXConfig, OPTConfig)\n - n_layer: (GPT2Config, GPTJConfig, BloomConfig)\n \"\"\"\n num_hidden_layers_attrs = (\"num_hidden_layers\", \"n_layer\")\n return findattr(config, num_hidden_layers_attrs)\n\n\ndef get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:\n \"\"\"\n Computes element-wise mean and variance of the tensor across processes\n \"\"\"\n sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)\n dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)\n global_sum, count = sum_and_count\n global_mean = global_sum / count\n\n sum_var = torch.sum((xs - global_mean) ** 2)\n dist.all_reduce(sum_var, dist.ReduceOp.SUM)\n global_var = sum_var / count\n return global_mean, global_var, count\n\n\ndef whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:\n \"\"\"Whitens values\"\"\"\n if distributed and dist.is_initialized():\n mean, var, _ = get_global_statistics(xs)\n else:\n var, mean = torch.var_mean(xs)\n\n whitened = (xs - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef logprobs_of_labels(logits, labels):\n \"\"\"Log probabilities of the labels\n\n These are calculated from the logits.\"\"\"\n logprobs = F.log_softmax(logits, dim=-1)\n logprobs_labels = torch.gather(logprobs, dim=-1, index=labels.unsqueeze(-1))\n return logprobs_labels.squeeze(-1)\n\n\ndef flatten_dict(\n d: Union[dict, MutableMapping],\n parent_key: str = \"\",\n sep: str = \"/\",\n) -> dict:\n # From: https://stackoverflow.com/a/6027615\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef get_tensor_stats(xs: torch.Tensor, mask: torch.Tensor, n: int):\n mean = (xs * mask).sum() / n\n return dict(\n mean=mean,\n min=torch.where(mask.bool(), xs, np.inf).min(),\n max=torch.where(mask.bool(), xs, -np.inf).max(),\n std=torch.sqrt(((xs - mean) * mask).pow(2).sum() / n),\n )\n\n\nclass RunningMoments:\n def __init__(self):\n \"\"\"\n Calculates the running mean and standard deviation of a data stream. Modified version of\n https://github.com/DLR-RM/stable-baselines3/blob/a6f5049a99a4c21a6f0bcce458ca3306cef310e0/stable_baselines3/common/running_mean_std.py\n \"\"\"\n self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n\n def update(self, xs: torch.Tensor) -> Tuple[float, float]:\n \"\"\"Updates running moments from batch's moments computed across ranks\"\"\"\n if dist.is_initialized():\n xs_mean, xs_var, xs_count = get_global_statistics(xs)\n else:\n xs_count = xs.numel()\n xs_var, xs_mean = torch.var_mean(xs, unbiased=False)\n\n delta = xs_mean - self.mean\n tot_count = self.count + xs_count\n\n new_sum = xs_var * xs_count\n # correct old_sum deviation accounting for the new mean\n old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count\n tot_sum = old_sum + new_sum\n\n self.mean += delta * xs_count / tot_count\n self.var = tot_sum / tot_count\n self.std = (self.var * tot_count / (tot_count - 1)).sqrt()\n self.count = tot_count\n\n return xs_mean, (xs_var * xs_count / (xs_count - 1)).sqrt()\n\n\n# OpenDelta utilities\n\n\nMODIFIED_MODULES_DICT = {\n \"gptj\": {\n \"attention\": [\"attn.q_proj\", \"attn.k_proj\", \"attn.v_proj\"],\n \"mlp\": [\"mlp.fc_in\", \"mlp.fc_out\"],\n \"all\": [\n \"attn.q_proj\",\n \"attn.k_proj\",\n \"attn.v_proj\",\n \"attn.out_proj\",\n \"mlp.fc_in\",\n \"mlp.fc_out\",\n ],\n },\n \"gpt_neox\": {\n \"attention\": [\"attention.query_key_value\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"attention.query_key_value\",\n \"attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"opt\": {\n \"attention\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n ],\n \"mlp\": [\"fc1\", \"fc2\"],\n \"all\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n \"fc1\",\n \"fc2\",\n ],\n },\n \"bloom\": {\n \"attention\": [\"self_attention.query_key_value\", \"self_attention.dense\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"self_attention.query_key_value\",\n \"self_attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"t5\": {\n \"attention\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n ],\n \"mlp\": [\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n \"all\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n },\n}\n\n\ndef generate_layer_regex(config: transformers.PretrainedConfig, num_layers_unfrozen: int = -1) -> str:\n \"\"\"Generates a regex range for the specified number of learnable layers.\"\"\"", "metadata": {"task_id": "CarperAI--trlx/21", "ground_truth": " if num_layers_unfrozen == -1:\n return \"(\\d)+.\"\n num_hidden_layers = hf_get_num_hidden_layers(config)\n start_layer = num_hidden_layers - num_layers_unfrozen\n if start_layer < 0:\n raise Exception(\"Number of layers unfrozen cannot be greater than number of layers in the model\")\n pattern = f\"(?:{regex_for_range(start_layer, num_hidden_layers - 1)}).\"\n return f\"{pattern}\"\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 35, "lineno": 373, "function_name": "generate_layer_regex"}, "groundtruth": " if num_layers_unfrozen == -1:\n return \"(\\d)+.\"\n num_hidden_layers = hf_get_num_hidden_layers(config)\n start_layer = num_hidden_layers - num_layers_unfrozen\n if start_layer < 0:\n raise Exception(\"Number of layers unfrozen cannot be greater than number of layers in the model\")\n pattern = f\"(?:{regex_for_range(start_layer, num_hidden_layers - 1)}).\"\n return f\"{pattern}\"\n"} +{"prompt": "tokens\n encoder_blocks = model.encoder.block\n encoder_norm_layer = model.encoder.final_layer_norm\n decoder_norm_layer = model.decoder.final_layer_norm\n decoder_blocks = model.decoder.block[:-num_layers_unfrozen]\n blocks_to_freeze = (\n list(encoder_blocks)\n + list(decoder_blocks)\n + [shared_embed]\n + [encoder_norm_layer]\n + [decoder_norm_layer]\n + [decoder_embed]\n )\n for block in blocks_to_freeze:\n block.requires_grad_(False)\n\n\ndef rhasattr(obj, attr):\n \"\"\"A chain-able attribute version of hasattr. For example, to check if\n `obj` has the attribute `foo.bar.baz`, you can use:\n `rhasattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/67303315\n \"\"\"\n _nested_attrs = attr.split(\".\")\n _curr_obj = obj\n for _a in _nested_attrs[:-1]:\n if hasattr(_curr_obj, _a):\n _curr_obj = getattr(_curr_obj, _a)\n else:\n return False\n return hasattr(_curr_obj, _nested_attrs[-1])\n\n\ndef rgetattr(obj, attr: str, *args) -> object:\n \"\"\"A chain-able attribute version of getattr. For example, to get the\n attribute `foo.bar.baz` from `obj`, you can use:\n `rgetattr(obj, \"foo.bar.baz\")`\n Reference: https://stackoverflow.com/a/31174427\n \"\"\"\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split(\".\"))\n\n\ndef findattr(obj, attrs: Tuple[str]) -> Union[object, None]:\n for attr in attrs:\n if rhasattr(obj, attr):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n\n\ndef hf_get_decoder(model: nn.Module) -> nn.Module:\n \"\"\"Returns the causal decoder backbone of the specified HuggingFace transformers\n model.\n NOTE: Different model configurations have different causal decoder attribute\n names.\n - transformer: (GPT2LMHeadModel, GPTJConfig)\n - model.decoder: (OPTConfig, BloomConfig)\n - gpt_neox: (GPTNeoXConfig)\n \"\"\"\n decoder_attrs = (\"transformer\", \"model.decoder\", \"gpt_neox\", \"decoder\")\n return findattr(model, decoder_attrs)\n\n\ndef hf_get_decoder_final_norm(model: nn.Module) -> float:\n \"\"\"Returns the final (layer) norm of the specified decoder.\n NOTE: Different model configurations have different final norm attribute names.\n - transformer.ln_f: (GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.final_layer_norm: (OPTForCausalLM)\n - gpt_neox.layers.final_layer_norm: (GPTNeoXForCausalLM)\n \"\"\"\n norm_attrs = (\n \"transformer.ln_f\",\n \"model.decoder.final_layer_norm\",\n \"decoder.final_layer_norm\",\n \"gpt_neox.final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n\n\ndef hf_get_decoder_blocks(model: nn.Module) -> Tuple[nn.Module]:\n \"\"\"Returns the decoder hidden layers of the specified model.\n NOTE: Different model configurations have different hidden layer attribute names.\n - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.layers: (OPTForCausalLM)\n - gpt_neox.layers: (GPTNeoXForCausalLM)\n - decoder.block: (T5ForConditionalGeneration)\n \"\"\"\n hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n\n\ndef hf_get_lm_head(model: nn.Module) -> nn.Module:\n \"\"\"Returns the language modeling (lm) head of the specified HuggingFace\n transformers model.\n NOTE: Different model configurations have different `lm_head` attribute names.\n - lm_head: (GPT2LMHeadModel, BloomForCausalLM)\n - embed_out: (GPTNeoXForCausalLM)\n \"\"\"\n return model.get_output_embeddings()\n\n\ndef hf_get_hidden_size(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the hidden layer dimensionality of the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different hidden size attribute names.\n - hidden_size: (OPTConfig, BloomConfig)\n - n_embd: (GPT2Config, GPTJConfig)\n - d_model: (PegasusConfig, XLNetConfig)\n \"\"\"\n hidden_size_attrs = (\"hidden_size\", \"n_embd\", \"d_model\")\n return findattr(config, hidden_size_attrs)\n\n\ndef hf_get_num_hidden_layers(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the number of hidden layers in the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different number-of-layers attribute\n names.\n - num_hidden_layers: (GPTNeoXConfig, OPTConfig)\n - n_layer: (GPT2Config, GPTJConfig, BloomConfig)\n \"\"\"\n num_hidden_layers_attrs = (\"num_hidden_layers\", \"n_layer\")\n return findattr(config, num_hidden_layers_attrs)\n\n\ndef get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:\n \"\"\"\n Computes element-wise mean and variance of the tensor across processes\n \"\"\"\n sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)\n dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)\n global_sum, count = sum_and_count\n global_mean = global_sum / count\n\n sum_var = torch.sum((xs - global_mean) ** 2)\n dist.all_reduce(sum_var, dist.ReduceOp.SUM)\n global_var = sum_var / count\n return global_mean, global_var, count\n\n\ndef whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:\n \"\"\"Whitens values\"\"\"\n if distributed and dist.is_initialized():\n mean, var, _ = get_global_statistics(xs)\n else:\n var, mean = torch.var_mean(xs)\n\n whitened = (xs - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef logprobs_of_labels(logits, labels):\n \"\"\"Log probabilities of the labels\n\n These are calculated from the logits.\"\"\"\n logprobs = F.log_softmax(logits, dim=-1)\n logprobs_labels = torch.gather(logprobs, dim=-1, index=labels.unsqueeze(-1))\n return logprobs_labels.squeeze(-1)\n\n\ndef flatten_dict(\n d: Union[dict, MutableMapping],\n parent_key: str = \"\",\n sep: str = \"/\",\n) -> dict:\n # From: https://stackoverflow.com/a/6027615\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef get_tensor_stats(xs: torch.Tensor, mask: torch.Tensor, n: int):\n mean = (xs * mask).sum() / n\n return dict(\n mean=mean,\n min=torch.where(mask.bool(), xs, np.inf).min(),\n max=torch.where(mask.bool(), xs, -np.inf).max(),\n std=torch.sqrt(((xs - mean) * mask).pow(2).sum() / n),\n )\n\n\nclass RunningMoments:\n def __init__(self):\n \"\"\"\n Calculates the running mean and standard deviation of a data stream. Modified version of\n https://github.com/DLR-RM/stable-baselines3/blob/a6f5049a99a4c21a6f0bcce458ca3306cef310e0/stable_baselines3/common/running_mean_std.py\n \"\"\"\n self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n\n def update(self, xs: torch.Tensor) -> Tuple[float, float]:\n \"\"\"Updates running moments from batch's moments computed across ranks\"\"\"\n if dist.is_initialized():\n xs_mean, xs_var, xs_count = get_global_statistics(xs)\n else:\n xs_count = xs.numel()\n xs_var, xs_mean = torch.var_mean(xs, unbiased=False)\n\n delta = xs_mean - self.mean\n tot_count = self.count + xs_count\n\n new_sum = xs_var * xs_count\n # correct old_sum deviation accounting for the new mean\n old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count\n tot_sum = old_sum + new_sum\n\n self.mean += delta * xs_count / tot_count\n self.var = tot_sum / tot_count\n self.std = (self.var * tot_count / (tot_count - 1)).sqrt()\n self.count = tot_count\n\n return xs_mean, (xs_var * xs_count / (xs_count - 1)).sqrt()\n\n\n# OpenDelta utilities\n\n\nMODIFIED_MODULES_DICT = {\n \"gptj\": {\n \"attention\": [\"attn.q_proj\", \"attn.k_proj\", \"attn.v_proj\"],\n \"mlp\": [\"mlp.fc_in\", \"mlp.fc_out\"],\n \"all\": [\n \"attn.q_proj\",\n \"attn.k_proj\",\n \"attn.v_proj\",\n \"attn.out_proj\",\n \"mlp.fc_in\",\n \"mlp.fc_out\",\n ],\n },\n \"gpt_neox\": {\n \"attention\": [\"attention.query_key_value\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"attention.query_key_value\",\n \"attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"opt\": {\n \"attention\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n ],\n \"mlp\": [\"fc1\", \"fc2\"],\n \"all\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n \"fc1\",\n \"fc2\",\n ],\n },\n \"bloom\": {\n \"attention\": [\"self_attention.query_key_value\", \"self_attention.dense\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"self_attention.query_key_value\",\n \"self_attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"t5\": {\n \"attention\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n ],\n \"mlp\": [\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n \"all\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n },\n}\n\n\ndef generate_layer_regex(config: transformers.PretrainedConfig, num_layers_unfrozen: int = -1) -> str:\n \"\"\"Generates a regex range for the specified number of learnable layers.\"\"\"\n if num_layers_unfrozen == -1:\n return \"(\\d)+.\"\n num_hidden_layers = hf_get_num_hidden_layers(config)\n start_layer = num_hidden_layers - num_layers_unfrozen\n if start_layer < 0:\n raise Exception(\"Number of layers unfrozen cannot be greater than number of layers in the model\")\n pattern = f\"(?:{regex_for_range(start_layer, num_hidden_layers - 1)}).\"\n return f\"{pattern}\"\n\n\ndef get_delta_modified_modules(\n config: transformers.PretrainedConfig,\n modified_modules: List[str],\n num_layers_unfrozen: int = -1,\n) -> List[str]:\n \"\"\"Returns a list of module names to be modified for a given delta method with\n the specified number of learnable layers.\"\"\"", "metadata": {"task_id": "CarperAI--trlx/22", "ground_truth": " unfrozen_layers_pattern = generate_layer_regex(config, num_layers_unfrozen)\n\n # [r] for regex as per https://github.com/thunlp/OpenDelta/blob/main/opendelta/utils/name_based_addressing.py#L20\n regex_prefix = \"[r]\"\n # TODO (jon-tow): `decoder.block.` is hardcoded to support T5 layer naming.\n decoder_prefix = \"decoder.block.\" if config.is_encoder_decoder else \"\"\n module_list = [regex_prefix + decoder_prefix + unfrozen_layers_pattern + module for module in modified_modules]\n return module_list\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 51, "lineno": 390, "function_name": "get_delta_modified_modules"}, "groundtruth": " unfrozen_layers_pattern = generate_layer_regex(config, num_layers_unfrozen)\n\n # [r] for regex as per https://github.com/thunlp/OpenDelta/blob/main/opendelta/utils/name_based_addressing.py#L20\n regex_prefix = \"[r]\"\n # TODO (jon-tow): `decoder.block.` is hardcoded to support T5 layer naming.\n decoder_prefix = \"decoder.block.\" if config.is_encoder_decoder else \"\"\n module_list = [regex_prefix + decoder_prefix + unfrozen_layers_pattern + module for module in modified_modules]\n return module_list\n"} +{"prompt": "):\n return rgetattr(obj, attr)\n raise ValueError(f\"Could not find an attribute from `{attrs}` in `{obj}`\")\n\n\ndef hf_get_decoder(model: nn.Module) -> nn.Module:\n \"\"\"Returns the causal decoder backbone of the specified HuggingFace transformers\n model.\n NOTE: Different model configurations have different causal decoder attribute\n names.\n - transformer: (GPT2LMHeadModel, GPTJConfig)\n - model.decoder: (OPTConfig, BloomConfig)\n - gpt_neox: (GPTNeoXConfig)\n \"\"\"\n decoder_attrs = (\"transformer\", \"model.decoder\", \"gpt_neox\", \"decoder\")\n return findattr(model, decoder_attrs)\n\n\ndef hf_get_decoder_final_norm(model: nn.Module) -> float:\n \"\"\"Returns the final (layer) norm of the specified decoder.\n NOTE: Different model configurations have different final norm attribute names.\n - transformer.ln_f: (GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.final_layer_norm: (OPTForCausalLM)\n - gpt_neox.layers.final_layer_norm: (GPTNeoXForCausalLM)\n \"\"\"\n norm_attrs = (\n \"transformer.ln_f\",\n \"model.decoder.final_layer_norm\",\n \"decoder.final_layer_norm\",\n \"gpt_neox.final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n\n\ndef hf_get_decoder_blocks(model: nn.Module) -> Tuple[nn.Module]:\n \"\"\"Returns the decoder hidden layers of the specified model.\n NOTE: Different model configurations have different hidden layer attribute names.\n - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.layers: (OPTForCausalLM)\n - gpt_neox.layers: (GPTNeoXForCausalLM)\n - decoder.block: (T5ForConditionalGeneration)\n \"\"\"\n hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n\n\ndef hf_get_lm_head(model: nn.Module) -> nn.Module:\n \"\"\"Returns the language modeling (lm) head of the specified HuggingFace\n transformers model.\n NOTE: Different model configurations have different `lm_head` attribute names.\n - lm_head: (GPT2LMHeadModel, BloomForCausalLM)\n - embed_out: (GPTNeoXForCausalLM)\n \"\"\"\n return model.get_output_embeddings()\n\n\ndef hf_get_hidden_size(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the hidden layer dimensionality of the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different hidden size attribute names.\n - hidden_size: (OPTConfig, BloomConfig)\n - n_embd: (GPT2Config, GPTJConfig)\n - d_model: (PegasusConfig, XLNetConfig)\n \"\"\"\n hidden_size_attrs = (\"hidden_size\", \"n_embd\", \"d_model\")\n return findattr(config, hidden_size_attrs)\n\n\ndef hf_get_num_hidden_layers(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the number of hidden layers in the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different number-of-layers attribute\n names.\n - num_hidden_layers: (GPTNeoXConfig, OPTConfig)\n - n_layer: (GPT2Config, GPTJConfig, BloomConfig)\n \"\"\"\n num_hidden_layers_attrs = (\"num_hidden_layers\", \"n_layer\")\n return findattr(config, num_hidden_layers_attrs)\n\n\ndef get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:\n \"\"\"\n Computes element-wise mean and variance of the tensor across processes\n \"\"\"\n sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)\n dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)\n global_sum, count = sum_and_count\n global_mean = global_sum / count\n\n sum_var = torch.sum((xs - global_mean) ** 2)\n dist.all_reduce(sum_var, dist.ReduceOp.SUM)\n global_var = sum_var / count\n return global_mean, global_var, count\n\n\ndef whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:\n \"\"\"Whitens values\"\"\"\n if distributed and dist.is_initialized():\n mean, var, _ = get_global_statistics(xs)\n else:\n var, mean = torch.var_mean(xs)\n\n whitened = (xs - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef logprobs_of_labels(logits, labels):\n \"\"\"Log probabilities of the labels\n\n These are calculated from the logits.\"\"\"\n logprobs = F.log_softmax(logits, dim=-1)\n logprobs_labels = torch.gather(logprobs, dim=-1, index=labels.unsqueeze(-1))\n return logprobs_labels.squeeze(-1)\n\n\ndef flatten_dict(\n d: Union[dict, MutableMapping],\n parent_key: str = \"\",\n sep: str = \"/\",\n) -> dict:\n # From: https://stackoverflow.com/a/6027615\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef get_tensor_stats(xs: torch.Tensor, mask: torch.Tensor, n: int):\n mean = (xs * mask).sum() / n\n return dict(\n mean=mean,\n min=torch.where(mask.bool(), xs, np.inf).min(),\n max=torch.where(mask.bool(), xs, -np.inf).max(),\n std=torch.sqrt(((xs - mean) * mask).pow(2).sum() / n),\n )\n\n\nclass RunningMoments:\n def __init__(self):\n \"\"\"\n Calculates the running mean and standard deviation of a data stream. Modified version of\n https://github.com/DLR-RM/stable-baselines3/blob/a6f5049a99a4c21a6f0bcce458ca3306cef310e0/stable_baselines3/common/running_mean_std.py\n \"\"\"\n self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n\n def update(self, xs: torch.Tensor) -> Tuple[float, float]:\n \"\"\"Updates running moments from batch's moments computed across ranks\"\"\"\n if dist.is_initialized():\n xs_mean, xs_var, xs_count = get_global_statistics(xs)\n else:\n xs_count = xs.numel()\n xs_var, xs_mean = torch.var_mean(xs, unbiased=False)\n\n delta = xs_mean - self.mean\n tot_count = self.count + xs_count\n\n new_sum = xs_var * xs_count\n # correct old_sum deviation accounting for the new mean\n old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count\n tot_sum = old_sum + new_sum\n\n self.mean += delta * xs_count / tot_count\n self.var = tot_sum / tot_count\n self.std = (self.var * tot_count / (tot_count - 1)).sqrt()\n self.count = tot_count\n\n return xs_mean, (xs_var * xs_count / (xs_count - 1)).sqrt()\n\n\n# OpenDelta utilities\n\n\nMODIFIED_MODULES_DICT = {\n \"gptj\": {\n \"attention\": [\"attn.q_proj\", \"attn.k_proj\", \"attn.v_proj\"],\n \"mlp\": [\"mlp.fc_in\", \"mlp.fc_out\"],\n \"all\": [\n \"attn.q_proj\",\n \"attn.k_proj\",\n \"attn.v_proj\",\n \"attn.out_proj\",\n \"mlp.fc_in\",\n \"mlp.fc_out\",\n ],\n },\n \"gpt_neox\": {\n \"attention\": [\"attention.query_key_value\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"attention.query_key_value\",\n \"attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"opt\": {\n \"attention\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n ],\n \"mlp\": [\"fc1\", \"fc2\"],\n \"all\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n \"fc1\",\n \"fc2\",\n ],\n },\n \"bloom\": {\n \"attention\": [\"self_attention.query_key_value\", \"self_attention.dense\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"self_attention.query_key_value\",\n \"self_attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"t5\": {\n \"attention\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n ],\n \"mlp\": [\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n \"all\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n },\n}\n\n\ndef generate_layer_regex(config: transformers.PretrainedConfig, num_layers_unfrozen: int = -1) -> str:\n \"\"\"Generates a regex range for the specified number of learnable layers.\"\"\"\n if num_layers_unfrozen == -1:\n return \"(\\d)+.\"\n num_hidden_layers = hf_get_num_hidden_layers(config)\n start_layer = num_hidden_layers - num_layers_unfrozen\n if start_layer < 0:\n raise Exception(\"Number of layers unfrozen cannot be greater than number of layers in the model\")\n pattern = f\"(?:{regex_for_range(start_layer, num_hidden_layers - 1)}).\"\n return f\"{pattern}\"\n\n\ndef get_delta_modified_modules(\n config: transformers.PretrainedConfig,\n modified_modules: List[str],\n num_layers_unfrozen: int = -1,\n) -> List[str]:\n \"\"\"Returns a list of module names to be modified for a given delta method with\n the specified number of learnable layers.\"\"\"\n unfrozen_layers_pattern = generate_layer_regex(config, num_layers_unfrozen)\n\n # [r] for regex as per https://github.com/thunlp/OpenDelta/blob/main/opendelta/utils/name_based_addressing.py#L20\n regex_prefix = \"[r]\"\n # TODO (jon-tow): `decoder.block.` is hardcoded to support T5 layer naming.\n decoder_prefix = \"decoder.block.\" if config.is_encoder_decoder else \"\"\n module_list = [regex_prefix + decoder_prefix + unfrozen_layers_pattern + module for module in modified_modules]\n return module_list\n\n\ndef get_delta_model_class(model_type: str):\n if not HAS_OPENDELTA:\n raise ValueError(\"OpenDelta package required to train with delta models. https://github.com/thunlp/OpenDelta.\")\n delta_models = {\n \"bitfit\": BitFitModel,\n \"adapter\": AdapterModel,\n \"prefix\": PrefixModel,\n \"lora\": LoraModel,\n \"softprompt\": SoftPromptModel,\n }\n return delta_models[model_type]\n\n\ndef parse_delta_kwargs(\n config: transformers.PretrainedConfig,\n delta_kwargs: Dict[str, Any],\n num_layers_unfrozen: int = -1,\n) -> Tuple[str, Dict[str, Any]]:\n \"\"\"Parses through delta kwargs to get delta type and proper modified modules.\"\"\"\n # This function is needed to parse through the `delta_kwargs` in order to:\n # 1) Get the `delta_type` method name to access the correct `delta_model_class`\n # 2a) Accept user specified `modified_modules` and if not provided use the `trlx` default mapping\n # 2b) Convert the list of `modified_modules` to a range of layers that fit within the range\n # of learnable layers as specified by `num_layers_unfrozen`\n\n # Pop `delta_type` to allow passing the kwargs to the model constructor since\n # `delta_type` is not a valid argument of the constructor", "metadata": {"task_id": "CarperAI--trlx/23", "ground_truth": " delta_type = delta_kwargs.pop(\"delta_type\")\n assert delta_type in [\"lora\"], \"Only `LoRA` based delta models are supported\"\n\n # Use `trlx` default modified modules if none are specified\n modified_modules = delta_kwargs.get(\"modified_modules\", \"all\")\n if modified_modules in [\"all\", \"attention\", \"mlp\"]:\n if config.model_type not in MODIFIED_MODULES_DICT:\n raise ValueError(\n f\"Model type `{config.model_type}` is not currently supported for \"\n \"delta training with default modified modules.\"\n )\n modified_modules = MODIFIED_MODULES_DICT[config.model_type][modified_modules]\n # Update the `modified_modules` with the correct layer ranges\n delta_kwargs[\"modified_modules\"] = get_delta_modified_modules(\n config, modified_modules, num_layers_unfrozen=num_layers_unfrozen\n )\n\n return delta_type, delta_kwargs\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 99, "lineno": 427, "function_name": "parse_delta_kwargs"}, "groundtruth": " delta_type = delta_kwargs.pop(\"delta_type\")\n assert delta_type in [\"lora\"], \"Only `LoRA` based delta models are supported\"\n\n # Use `trlx` default modified modules if none are specified\n modified_modules = delta_kwargs.get(\"modified_modules\", \"all\")\n if modified_modules in [\"all\", \"attention\", \"mlp\"]:\n if config.model_type not in MODIFIED_MODULES_DICT:\n raise ValueError(\n f\"Model type `{config.model_type}` is not currently supported for \"\n \"delta training with default modified modules.\"\n )\n modified_modules = MODIFIED_MODULES_DICT[config.model_type][modified_modules]\n # Update the `modified_modules` with the correct layer ranges\n delta_kwargs[\"modified_modules\"] = get_delta_modified_modules(\n config, modified_modules, num_layers_unfrozen=num_layers_unfrozen\n )\n\n return delta_type, delta_kwargs\n"} +{"prompt": ".final_layer_norm\",\n )\n return findattr(model, norm_attrs)\n\n\ndef hf_get_decoder_blocks(model: nn.Module) -> Tuple[nn.Module]:\n \"\"\"Returns the decoder hidden layers of the specified model.\n NOTE: Different model configurations have different hidden layer attribute names.\n - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.layers: (OPTForCausalLM)\n - gpt_neox.layers: (GPTNeoXForCausalLM)\n - decoder.block: (T5ForConditionalGeneration)\n \"\"\"\n hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n\n\ndef hf_get_lm_head(model: nn.Module) -> nn.Module:\n \"\"\"Returns the language modeling (lm) head of the specified HuggingFace\n transformers model.\n NOTE: Different model configurations have different `lm_head` attribute names.\n - lm_head: (GPT2LMHeadModel, BloomForCausalLM)\n - embed_out: (GPTNeoXForCausalLM)\n \"\"\"\n return model.get_output_embeddings()\n\n\ndef hf_get_hidden_size(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the hidden layer dimensionality of the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different hidden size attribute names.\n - hidden_size: (OPTConfig, BloomConfig)\n - n_embd: (GPT2Config, GPTJConfig)\n - d_model: (PegasusConfig, XLNetConfig)\n \"\"\"\n hidden_size_attrs = (\"hidden_size\", \"n_embd\", \"d_model\")\n return findattr(config, hidden_size_attrs)\n\n\ndef hf_get_num_hidden_layers(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the number of hidden layers in the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different number-of-layers attribute\n names.\n - num_hidden_layers: (GPTNeoXConfig, OPTConfig)\n - n_layer: (GPT2Config, GPTJConfig, BloomConfig)\n \"\"\"\n num_hidden_layers_attrs = (\"num_hidden_layers\", \"n_layer\")\n return findattr(config, num_hidden_layers_attrs)\n\n\ndef get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:\n \"\"\"\n Computes element-wise mean and variance of the tensor across processes\n \"\"\"\n sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)\n dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)\n global_sum, count = sum_and_count\n global_mean = global_sum / count\n\n sum_var = torch.sum((xs - global_mean) ** 2)\n dist.all_reduce(sum_var, dist.ReduceOp.SUM)\n global_var = sum_var / count\n return global_mean, global_var, count\n\n\ndef whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:\n \"\"\"Whitens values\"\"\"\n if distributed and dist.is_initialized():\n mean, var, _ = get_global_statistics(xs)\n else:\n var, mean = torch.var_mean(xs)\n\n whitened = (xs - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef logprobs_of_labels(logits, labels):\n \"\"\"Log probabilities of the labels\n\n These are calculated from the logits.\"\"\"\n logprobs = F.log_softmax(logits, dim=-1)\n logprobs_labels = torch.gather(logprobs, dim=-1, index=labels.unsqueeze(-1))\n return logprobs_labels.squeeze(-1)\n\n\ndef flatten_dict(\n d: Union[dict, MutableMapping],\n parent_key: str = \"\",\n sep: str = \"/\",\n) -> dict:\n # From: https://stackoverflow.com/a/6027615\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef get_tensor_stats(xs: torch.Tensor, mask: torch.Tensor, n: int):\n mean = (xs * mask).sum() / n\n return dict(\n mean=mean,\n min=torch.where(mask.bool(), xs, np.inf).min(),\n max=torch.where(mask.bool(), xs, -np.inf).max(),\n std=torch.sqrt(((xs - mean) * mask).pow(2).sum() / n),\n )\n\n\nclass RunningMoments:\n def __init__(self):\n \"\"\"\n Calculates the running mean and standard deviation of a data stream. Modified version of\n https://github.com/DLR-RM/stable-baselines3/blob/a6f5049a99a4c21a6f0bcce458ca3306cef310e0/stable_baselines3/common/running_mean_std.py\n \"\"\"\n self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n\n def update(self, xs: torch.Tensor) -> Tuple[float, float]:\n \"\"\"Updates running moments from batch's moments computed across ranks\"\"\"\n if dist.is_initialized():\n xs_mean, xs_var, xs_count = get_global_statistics(xs)\n else:\n xs_count = xs.numel()\n xs_var, xs_mean = torch.var_mean(xs, unbiased=False)\n\n delta = xs_mean - self.mean\n tot_count = self.count + xs_count\n\n new_sum = xs_var * xs_count\n # correct old_sum deviation accounting for the new mean\n old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count\n tot_sum = old_sum + new_sum\n\n self.mean += delta * xs_count / tot_count\n self.var = tot_sum / tot_count\n self.std = (self.var * tot_count / (tot_count - 1)).sqrt()\n self.count = tot_count\n\n return xs_mean, (xs_var * xs_count / (xs_count - 1)).sqrt()\n\n\n# OpenDelta utilities\n\n\nMODIFIED_MODULES_DICT = {\n \"gptj\": {\n \"attention\": [\"attn.q_proj\", \"attn.k_proj\", \"attn.v_proj\"],\n \"mlp\": [\"mlp.fc_in\", \"mlp.fc_out\"],\n \"all\": [\n \"attn.q_proj\",\n \"attn.k_proj\",\n \"attn.v_proj\",\n \"attn.out_proj\",\n \"mlp.fc_in\",\n \"mlp.fc_out\",\n ],\n },\n \"gpt_neox\": {\n \"attention\": [\"attention.query_key_value\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"attention.query_key_value\",\n \"attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"opt\": {\n \"attention\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n ],\n \"mlp\": [\"fc1\", \"fc2\"],\n \"all\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n \"fc1\",\n \"fc2\",\n ],\n },\n \"bloom\": {\n \"attention\": [\"self_attention.query_key_value\", \"self_attention.dense\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"self_attention.query_key_value\",\n \"self_attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"t5\": {\n \"attention\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n ],\n \"mlp\": [\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n \"all\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n },\n}\n\n\ndef generate_layer_regex(config: transformers.PretrainedConfig, num_layers_unfrozen: int = -1) -> str:\n \"\"\"Generates a regex range for the specified number of learnable layers.\"\"\"\n if num_layers_unfrozen == -1:\n return \"(\\d)+.\"\n num_hidden_layers = hf_get_num_hidden_layers(config)\n start_layer = num_hidden_layers - num_layers_unfrozen\n if start_layer < 0:\n raise Exception(\"Number of layers unfrozen cannot be greater than number of layers in the model\")\n pattern = f\"(?:{regex_for_range(start_layer, num_hidden_layers - 1)}).\"\n return f\"{pattern}\"\n\n\ndef get_delta_modified_modules(\n config: transformers.PretrainedConfig,\n modified_modules: List[str],\n num_layers_unfrozen: int = -1,\n) -> List[str]:\n \"\"\"Returns a list of module names to be modified for a given delta method with\n the specified number of learnable layers.\"\"\"\n unfrozen_layers_pattern = generate_layer_regex(config, num_layers_unfrozen)\n\n # [r] for regex as per https://github.com/thunlp/OpenDelta/blob/main/opendelta/utils/name_based_addressing.py#L20\n regex_prefix = \"[r]\"\n # TODO (jon-tow): `decoder.block.` is hardcoded to support T5 layer naming.\n decoder_prefix = \"decoder.block.\" if config.is_encoder_decoder else \"\"\n module_list = [regex_prefix + decoder_prefix + unfrozen_layers_pattern + module for module in modified_modules]\n return module_list\n\n\ndef get_delta_model_class(model_type: str):\n if not HAS_OPENDELTA:\n raise ValueError(\"OpenDelta package required to train with delta models. https://github.com/thunlp/OpenDelta.\")\n delta_models = {\n \"bitfit\": BitFitModel,\n \"adapter\": AdapterModel,\n \"prefix\": PrefixModel,\n \"lora\": LoraModel,\n \"softprompt\": SoftPromptModel,\n }\n return delta_models[model_type]\n\n\ndef parse_delta_kwargs(\n config: transformers.PretrainedConfig,\n delta_kwargs: Dict[str, Any],\n num_layers_unfrozen: int = -1,\n) -> Tuple[str, Dict[str, Any]]:\n \"\"\"Parses through delta kwargs to get delta type and proper modified modules.\"\"\"\n # This function is needed to parse through the `delta_kwargs` in order to:\n # 1) Get the `delta_type` method name to access the correct `delta_model_class`\n # 2a) Accept user specified `modified_modules` and if not provided use the `trlx` default mapping\n # 2b) Convert the list of `modified_modules` to a range of layers that fit within the range\n # of learnable layers as specified by `num_layers_unfrozen`\n\n # Pop `delta_type` to allow passing the kwargs to the model constructor since\n # `delta_type` is not a valid argument of the constructor\n delta_type = delta_kwargs.pop(\"delta_type\")\n assert delta_type in [\"lora\"], \"Only `LoRA` based delta models are supported\"\n\n # Use `trlx` default modified modules if none are specified\n modified_modules = delta_kwargs.get(\"modified_modules\", \"all\")\n if modified_modules in [\"all\", \"attention\", \"mlp\"]:\n if config.model_type not in MODIFIED_MODULES_DICT:\n raise ValueError(\n f\"Model type `{config.model_type}` is not currently supported for \"\n \"delta training with default modified modules.\"\n )\n modified_modules = MODIFIED_MODULES_DICT[config.model_type][modified_modules]\n # Update the `modified_modules` with the correct layer ranges\n delta_kwargs[\"modified_modules\"] = get_delta_modified_modules(\n config, modified_modules, num_layers_unfrozen=num_layers_unfrozen\n )\n\n return delta_type, delta_kwargs\n\n\ndef regex_for_range(min_: int, max_: int) -> str: # noqa\n \"\"\"Returns a regex that matches all numbers in the given range.\n\n Example: regex_for_range(12, 34) -> \"1[2-9]|2\\d|3[0-4]\"\n\n Copyright (c) 2013, Dmitry Voronin. All rights reserved.\n Reference: https://github.com/voronind/range-regex\n \"\"\"\n\n def split_to_patterns(min_, max_):", "metadata": {"task_id": "CarperAI--trlx/24", "ground_truth": " subpatterns = []\n start = min_\n for stop in split_to_ranges(min_, max_):\n subpatterns.append(range_to_pattern(start, stop))\n start = stop + 1\n return subpatterns\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 128, "lineno": 457, "function_name": "split_to_patterns"}, "groundtruth": " subpatterns = []\n start = min_\n for stop in split_to_ranges(min_, max_):\n subpatterns.append(range_to_pattern(start, stop))\n start = stop + 1\n return subpatterns\n"} +{"prompt": ".\n - transformer.h: (BloomForCausalLM, GPT2LMHeadModel, GPTJForCausalLM)\n - model.decoder.layers: (OPTForCausalLM)\n - gpt_neox.layers: (GPTNeoXForCausalLM)\n - decoder.block: (T5ForConditionalGeneration)\n \"\"\"\n hidden_layers_attrs = (\n \"h\",\n \"layers\",\n \"decoder.layers\",\n \"transformer.h\",\n \"model.decoder.layers\",\n \"gpt_neox.layers\",\n \"decoder.block\",\n )\n return findattr(model, hidden_layers_attrs)\n\n\ndef hf_get_lm_head(model: nn.Module) -> nn.Module:\n \"\"\"Returns the language modeling (lm) head of the specified HuggingFace\n transformers model.\n NOTE: Different model configurations have different `lm_head` attribute names.\n - lm_head: (GPT2LMHeadModel, BloomForCausalLM)\n - embed_out: (GPTNeoXForCausalLM)\n \"\"\"\n return model.get_output_embeddings()\n\n\ndef hf_get_hidden_size(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the hidden layer dimensionality of the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different hidden size attribute names.\n - hidden_size: (OPTConfig, BloomConfig)\n - n_embd: (GPT2Config, GPTJConfig)\n - d_model: (PegasusConfig, XLNetConfig)\n \"\"\"\n hidden_size_attrs = (\"hidden_size\", \"n_embd\", \"d_model\")\n return findattr(config, hidden_size_attrs)\n\n\ndef hf_get_num_hidden_layers(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the number of hidden layers in the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different number-of-layers attribute\n names.\n - num_hidden_layers: (GPTNeoXConfig, OPTConfig)\n - n_layer: (GPT2Config, GPTJConfig, BloomConfig)\n \"\"\"\n num_hidden_layers_attrs = (\"num_hidden_layers\", \"n_layer\")\n return findattr(config, num_hidden_layers_attrs)\n\n\ndef get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:\n \"\"\"\n Computes element-wise mean and variance of the tensor across processes\n \"\"\"\n sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)\n dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)\n global_sum, count = sum_and_count\n global_mean = global_sum / count\n\n sum_var = torch.sum((xs - global_mean) ** 2)\n dist.all_reduce(sum_var, dist.ReduceOp.SUM)\n global_var = sum_var / count\n return global_mean, global_var, count\n\n\ndef whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:\n \"\"\"Whitens values\"\"\"\n if distributed and dist.is_initialized():\n mean, var, _ = get_global_statistics(xs)\n else:\n var, mean = torch.var_mean(xs)\n\n whitened = (xs - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef logprobs_of_labels(logits, labels):\n \"\"\"Log probabilities of the labels\n\n These are calculated from the logits.\"\"\"\n logprobs = F.log_softmax(logits, dim=-1)\n logprobs_labels = torch.gather(logprobs, dim=-1, index=labels.unsqueeze(-1))\n return logprobs_labels.squeeze(-1)\n\n\ndef flatten_dict(\n d: Union[dict, MutableMapping],\n parent_key: str = \"\",\n sep: str = \"/\",\n) -> dict:\n # From: https://stackoverflow.com/a/6027615\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef get_tensor_stats(xs: torch.Tensor, mask: torch.Tensor, n: int):\n mean = (xs * mask).sum() / n\n return dict(\n mean=mean,\n min=torch.where(mask.bool(), xs, np.inf).min(),\n max=torch.where(mask.bool(), xs, -np.inf).max(),\n std=torch.sqrt(((xs - mean) * mask).pow(2).sum() / n),\n )\n\n\nclass RunningMoments:\n def __init__(self):\n \"\"\"\n Calculates the running mean and standard deviation of a data stream. Modified version of\n https://github.com/DLR-RM/stable-baselines3/blob/a6f5049a99a4c21a6f0bcce458ca3306cef310e0/stable_baselines3/common/running_mean_std.py\n \"\"\"\n self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n\n def update(self, xs: torch.Tensor) -> Tuple[float, float]:\n \"\"\"Updates running moments from batch's moments computed across ranks\"\"\"\n if dist.is_initialized():\n xs_mean, xs_var, xs_count = get_global_statistics(xs)\n else:\n xs_count = xs.numel()\n xs_var, xs_mean = torch.var_mean(xs, unbiased=False)\n\n delta = xs_mean - self.mean\n tot_count = self.count + xs_count\n\n new_sum = xs_var * xs_count\n # correct old_sum deviation accounting for the new mean\n old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count\n tot_sum = old_sum + new_sum\n\n self.mean += delta * xs_count / tot_count\n self.var = tot_sum / tot_count\n self.std = (self.var * tot_count / (tot_count - 1)).sqrt()\n self.count = tot_count\n\n return xs_mean, (xs_var * xs_count / (xs_count - 1)).sqrt()\n\n\n# OpenDelta utilities\n\n\nMODIFIED_MODULES_DICT = {\n \"gptj\": {\n \"attention\": [\"attn.q_proj\", \"attn.k_proj\", \"attn.v_proj\"],\n \"mlp\": [\"mlp.fc_in\", \"mlp.fc_out\"],\n \"all\": [\n \"attn.q_proj\",\n \"attn.k_proj\",\n \"attn.v_proj\",\n \"attn.out_proj\",\n \"mlp.fc_in\",\n \"mlp.fc_out\",\n ],\n },\n \"gpt_neox\": {\n \"attention\": [\"attention.query_key_value\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"attention.query_key_value\",\n \"attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"opt\": {\n \"attention\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n ],\n \"mlp\": [\"fc1\", \"fc2\"],\n \"all\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n \"fc1\",\n \"fc2\",\n ],\n },\n \"bloom\": {\n \"attention\": [\"self_attention.query_key_value\", \"self_attention.dense\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"self_attention.query_key_value\",\n \"self_attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"t5\": {\n \"attention\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n ],\n \"mlp\": [\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n \"all\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n },\n}\n\n\ndef generate_layer_regex(config: transformers.PretrainedConfig, num_layers_unfrozen: int = -1) -> str:\n \"\"\"Generates a regex range for the specified number of learnable layers.\"\"\"\n if num_layers_unfrozen == -1:\n return \"(\\d)+.\"\n num_hidden_layers = hf_get_num_hidden_layers(config)\n start_layer = num_hidden_layers - num_layers_unfrozen\n if start_layer < 0:\n raise Exception(\"Number of layers unfrozen cannot be greater than number of layers in the model\")\n pattern = f\"(?:{regex_for_range(start_layer, num_hidden_layers - 1)}).\"\n return f\"{pattern}\"\n\n\ndef get_delta_modified_modules(\n config: transformers.PretrainedConfig,\n modified_modules: List[str],\n num_layers_unfrozen: int = -1,\n) -> List[str]:\n \"\"\"Returns a list of module names to be modified for a given delta method with\n the specified number of learnable layers.\"\"\"\n unfrozen_layers_pattern = generate_layer_regex(config, num_layers_unfrozen)\n\n # [r] for regex as per https://github.com/thunlp/OpenDelta/blob/main/opendelta/utils/name_based_addressing.py#L20\n regex_prefix = \"[r]\"\n # TODO (jon-tow): `decoder.block.` is hardcoded to support T5 layer naming.\n decoder_prefix = \"decoder.block.\" if config.is_encoder_decoder else \"\"\n module_list = [regex_prefix + decoder_prefix + unfrozen_layers_pattern + module for module in modified_modules]\n return module_list\n\n\ndef get_delta_model_class(model_type: str):\n if not HAS_OPENDELTA:\n raise ValueError(\"OpenDelta package required to train with delta models. https://github.com/thunlp/OpenDelta.\")\n delta_models = {\n \"bitfit\": BitFitModel,\n \"adapter\": AdapterModel,\n \"prefix\": PrefixModel,\n \"lora\": LoraModel,\n \"softprompt\": SoftPromptModel,\n }\n return delta_models[model_type]\n\n\ndef parse_delta_kwargs(\n config: transformers.PretrainedConfig,\n delta_kwargs: Dict[str, Any],\n num_layers_unfrozen: int = -1,\n) -> Tuple[str, Dict[str, Any]]:\n \"\"\"Parses through delta kwargs to get delta type and proper modified modules.\"\"\"\n # This function is needed to parse through the `delta_kwargs` in order to:\n # 1) Get the `delta_type` method name to access the correct `delta_model_class`\n # 2a) Accept user specified `modified_modules` and if not provided use the `trlx` default mapping\n # 2b) Convert the list of `modified_modules` to a range of layers that fit within the range\n # of learnable layers as specified by `num_layers_unfrozen`\n\n # Pop `delta_type` to allow passing the kwargs to the model constructor since\n # `delta_type` is not a valid argument of the constructor\n delta_type = delta_kwargs.pop(\"delta_type\")\n assert delta_type in [\"lora\"], \"Only `LoRA` based delta models are supported\"\n\n # Use `trlx` default modified modules if none are specified\n modified_modules = delta_kwargs.get(\"modified_modules\", \"all\")\n if modified_modules in [\"all\", \"attention\", \"mlp\"]:\n if config.model_type not in MODIFIED_MODULES_DICT:\n raise ValueError(\n f\"Model type `{config.model_type}` is not currently supported for \"\n \"delta training with default modified modules.\"\n )\n modified_modules = MODIFIED_MODULES_DICT[config.model_type][modified_modules]\n # Update the `modified_modules` with the correct layer ranges\n delta_kwargs[\"modified_modules\"] = get_delta_modified_modules(\n config, modified_modules, num_layers_unfrozen=num_layers_unfrozen\n )\n\n return delta_type, delta_kwargs\n\n\ndef regex_for_range(min_: int, max_: int) -> str: # noqa\n \"\"\"Returns a regex that matches all numbers in the given range.\n\n Example: regex_for_range(12, 34) -> \"1[2-9]|2\\d|3[0-4]\"\n\n Copyright (c) 2013, Dmitry Voronin. All rights reserved.\n Reference: https://github.com/voronind/range-regex\n \"\"\"\n\n def split_to_patterns(min_, max_):\n subpatterns = []\n start = min_\n for stop in split_to_ranges(min_, max_):\n subpatterns.append(range_to_pattern(start, stop))\n start = stop + 1\n return subpatterns\n\n def split_to_ranges(min_, max_):", "metadata": {"task_id": "CarperAI--trlx/25", "ground_truth": " stops = {max_}\n nines_count = 1\n stop = fill_by_nines(min_, nines_count)\n while min_ <= stop < max_:\n stops.add(stop)\n nines_count += 1\n stop = fill_by_nines(min_, nines_count)\n zeros_count = 1\n stop = fill_by_zeros(max_ + 1, zeros_count) - 1\n while min_ < stop <= max_:\n stops.add(stop)\n zeros_count += 1\n stop = fill_by_zeros(max_ + 1, zeros_count) - 1\n stops = list(stops)\n stops.sort()\n return stops\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 135, "lineno": 465, "function_name": "split_to_ranges"}, "groundtruth": " stops = {max_}\n nines_count = 1\n stop = fill_by_nines(min_, nines_count)\n while min_ <= stop < max_:\n stops.add(stop)\n nines_count += 1\n stop = fill_by_nines(min_, nines_count)\n zeros_count = 1\n stop = fill_by_zeros(max_ + 1, zeros_count) - 1\n while min_ < stop <= max_:\n stops.add(stop)\n zeros_count += 1\n stop = fill_by_zeros(max_ + 1, zeros_count) - 1\n stops = list(stops)\n stops.sort()\n return stops\n"} +{"prompt": "ForCausalLM)\n - embed_out: (GPTNeoXForCausalLM)\n \"\"\"\n return model.get_output_embeddings()\n\n\ndef hf_get_hidden_size(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the hidden layer dimensionality of the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different hidden size attribute names.\n - hidden_size: (OPTConfig, BloomConfig)\n - n_embd: (GPT2Config, GPTJConfig)\n - d_model: (PegasusConfig, XLNetConfig)\n \"\"\"\n hidden_size_attrs = (\"hidden_size\", \"n_embd\", \"d_model\")\n return findattr(config, hidden_size_attrs)\n\n\ndef hf_get_num_hidden_layers(config: transformers.PretrainedConfig) -> int:\n \"\"\"Returns the number of hidden layers in the model architecture specified\n by the HuggingFace transformers config.\n NOTE: Different model configurations have different number-of-layers attribute\n names.\n - num_hidden_layers: (GPTNeoXConfig, OPTConfig)\n - n_layer: (GPT2Config, GPTJConfig, BloomConfig)\n \"\"\"\n num_hidden_layers_attrs = (\"num_hidden_layers\", \"n_layer\")\n return findattr(config, num_hidden_layers_attrs)\n\n\ndef get_global_statistics(xs: torch.Tensor) -> Tuple[float, float, int]:\n \"\"\"\n Computes element-wise mean and variance of the tensor across processes\n \"\"\"\n sum_and_count = torch.tensor([xs.sum(), xs.numel()], device=xs.device)\n dist.all_reduce(sum_and_count, dist.ReduceOp.SUM)\n global_sum, count = sum_and_count\n global_mean = global_sum / count\n\n sum_var = torch.sum((xs - global_mean) ** 2)\n dist.all_reduce(sum_var, dist.ReduceOp.SUM)\n global_var = sum_var / count\n return global_mean, global_var, count\n\n\ndef whiten(xs: torch.Tensor, shift_mean=True, distributed=True) -> torch.Tensor:\n \"\"\"Whitens values\"\"\"\n if distributed and dist.is_initialized():\n mean, var, _ = get_global_statistics(xs)\n else:\n var, mean = torch.var_mean(xs)\n\n whitened = (xs - mean) * torch.rsqrt(var + 1e-8)\n if not shift_mean:\n whitened += mean\n return whitened\n\n\ndef logprobs_of_labels(logits, labels):\n \"\"\"Log probabilities of the labels\n\n These are calculated from the logits.\"\"\"\n logprobs = F.log_softmax(logits, dim=-1)\n logprobs_labels = torch.gather(logprobs, dim=-1, index=labels.unsqueeze(-1))\n return logprobs_labels.squeeze(-1)\n\n\ndef flatten_dict(\n d: Union[dict, MutableMapping],\n parent_key: str = \"\",\n sep: str = \"/\",\n) -> dict:\n # From: https://stackoverflow.com/a/6027615\n items = []\n for k, v in d.items():\n new_key = parent_key + sep + k if parent_key else k\n if isinstance(v, MutableMapping):\n items.extend(flatten_dict(v, new_key, sep=sep).items())\n else:\n items.append((new_key, v))\n return dict(items)\n\n\ndef get_tensor_stats(xs: torch.Tensor, mask: torch.Tensor, n: int):\n mean = (xs * mask).sum() / n\n return dict(\n mean=mean,\n min=torch.where(mask.bool(), xs, np.inf).min(),\n max=torch.where(mask.bool(), xs, -np.inf).max(),\n std=torch.sqrt(((xs - mean) * mask).pow(2).sum() / n),\n )\n\n\nclass RunningMoments:\n def __init__(self):\n \"\"\"\n Calculates the running mean and standard deviation of a data stream. Modified version of\n https://github.com/DLR-RM/stable-baselines3/blob/a6f5049a99a4c21a6f0bcce458ca3306cef310e0/stable_baselines3/common/running_mean_std.py\n \"\"\"\n self.mean = 0\n self.std = 1\n self.var = 1\n self.count = 1e-24\n\n def update(self, xs: torch.Tensor) -> Tuple[float, float]:\n \"\"\"Updates running moments from batch's moments computed across ranks\"\"\"\n if dist.is_initialized():\n xs_mean, xs_var, xs_count = get_global_statistics(xs)\n else:\n xs_count = xs.numel()\n xs_var, xs_mean = torch.var_mean(xs, unbiased=False)\n\n delta = xs_mean - self.mean\n tot_count = self.count + xs_count\n\n new_sum = xs_var * xs_count\n # correct old_sum deviation accounting for the new mean\n old_sum = self.var * self.count + delta**2 * self.count * xs_count / tot_count\n tot_sum = old_sum + new_sum\n\n self.mean += delta * xs_count / tot_count\n self.var = tot_sum / tot_count\n self.std = (self.var * tot_count / (tot_count - 1)).sqrt()\n self.count = tot_count\n\n return xs_mean, (xs_var * xs_count / (xs_count - 1)).sqrt()\n\n\n# OpenDelta utilities\n\n\nMODIFIED_MODULES_DICT = {\n \"gptj\": {\n \"attention\": [\"attn.q_proj\", \"attn.k_proj\", \"attn.v_proj\"],\n \"mlp\": [\"mlp.fc_in\", \"mlp.fc_out\"],\n \"all\": [\n \"attn.q_proj\",\n \"attn.k_proj\",\n \"attn.v_proj\",\n \"attn.out_proj\",\n \"mlp.fc_in\",\n \"mlp.fc_out\",\n ],\n },\n \"gpt_neox\": {\n \"attention\": [\"attention.query_key_value\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"attention.query_key_value\",\n \"attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"opt\": {\n \"attention\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n ],\n \"mlp\": [\"fc1\", \"fc2\"],\n \"all\": [\n \"self_attn.k_proj\",\n \"self_attn.v_proj\",\n \"self_attn.q_proj\",\n \"self_attn.out_proj\",\n \"fc1\",\n \"fc2\",\n ],\n },\n \"bloom\": {\n \"attention\": [\"self_attention.query_key_value\", \"self_attention.dense\"],\n \"mlp\": [\"mlp.dense_h_to_4h\", \"mlp.dense_4h_to_h\"],\n \"all\": [\n \"self_attention.query_key_value\",\n \"self_attention.dense\",\n \"mlp.dense_h_to_4h\",\n \"mlp.dense_4h_to_h\",\n ],\n },\n \"t5\": {\n \"attention\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n ],\n \"mlp\": [\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n \"all\": [\n \"layer.0.SelfAttention.q\",\n \"layer.0.SelfAttention.k\",\n \"layer.0.SelfAttention.v\",\n \"layer.0.SelfAttention.o\",\n \"layer.1.EncDecAttention.q\",\n \"layer.1.EncDecAttention.k\",\n \"layer.1.EncDecAttention.v\",\n \"layer.1.EncDecAttention.o\",\n \"layer.2.DenseReluDense.wo\",\n \"layer.2.DenseReluDense.wi_0\",\n \"layer.2.DenseReluDense.wi_1\",\n ],\n },\n}\n\n\ndef generate_layer_regex(config: transformers.PretrainedConfig, num_layers_unfrozen: int = -1) -> str:\n \"\"\"Generates a regex range for the specified number of learnable layers.\"\"\"\n if num_layers_unfrozen == -1:\n return \"(\\d)+.\"\n num_hidden_layers = hf_get_num_hidden_layers(config)\n start_layer = num_hidden_layers - num_layers_unfrozen\n if start_layer < 0:\n raise Exception(\"Number of layers unfrozen cannot be greater than number of layers in the model\")\n pattern = f\"(?:{regex_for_range(start_layer, num_hidden_layers - 1)}).\"\n return f\"{pattern}\"\n\n\ndef get_delta_modified_modules(\n config: transformers.PretrainedConfig,\n modified_modules: List[str],\n num_layers_unfrozen: int = -1,\n) -> List[str]:\n \"\"\"Returns a list of module names to be modified for a given delta method with\n the specified number of learnable layers.\"\"\"\n unfrozen_layers_pattern = generate_layer_regex(config, num_layers_unfrozen)\n\n # [r] for regex as per https://github.com/thunlp/OpenDelta/blob/main/opendelta/utils/name_based_addressing.py#L20\n regex_prefix = \"[r]\"\n # TODO (jon-tow): `decoder.block.` is hardcoded to support T5 layer naming.\n decoder_prefix = \"decoder.block.\" if config.is_encoder_decoder else \"\"\n module_list = [regex_prefix + decoder_prefix + unfrozen_layers_pattern + module for module in modified_modules]\n return module_list\n\n\ndef get_delta_model_class(model_type: str):\n if not HAS_OPENDELTA:\n raise ValueError(\"OpenDelta package required to train with delta models. https://github.com/thunlp/OpenDelta.\")\n delta_models = {\n \"bitfit\": BitFitModel,\n \"adapter\": AdapterModel,\n \"prefix\": PrefixModel,\n \"lora\": LoraModel,\n \"softprompt\": SoftPromptModel,\n }\n return delta_models[model_type]\n\n\ndef parse_delta_kwargs(\n config: transformers.PretrainedConfig,\n delta_kwargs: Dict[str, Any],\n num_layers_unfrozen: int = -1,\n) -> Tuple[str, Dict[str, Any]]:\n \"\"\"Parses through delta kwargs to get delta type and proper modified modules.\"\"\"\n # This function is needed to parse through the `delta_kwargs` in order to:\n # 1) Get the `delta_type` method name to access the correct `delta_model_class`\n # 2a) Accept user specified `modified_modules` and if not provided use the `trlx` default mapping\n # 2b) Convert the list of `modified_modules` to a range of layers that fit within the range\n # of learnable layers as specified by `num_layers_unfrozen`\n\n # Pop `delta_type` to allow passing the kwargs to the model constructor since\n # `delta_type` is not a valid argument of the constructor\n delta_type = delta_kwargs.pop(\"delta_type\")\n assert delta_type in [\"lora\"], \"Only `LoRA` based delta models are supported\"\n\n # Use `trlx` default modified modules if none are specified\n modified_modules = delta_kwargs.get(\"modified_modules\", \"all\")\n if modified_modules in [\"all\", \"attention\", \"mlp\"]:\n if config.model_type not in MODIFIED_MODULES_DICT:\n raise ValueError(\n f\"Model type `{config.model_type}` is not currently supported for \"\n \"delta training with default modified modules.\"\n )\n modified_modules = MODIFIED_MODULES_DICT[config.model_type][modified_modules]\n # Update the `modified_modules` with the correct layer ranges\n delta_kwargs[\"modified_modules\"] = get_delta_modified_modules(\n config, modified_modules, num_layers_unfrozen=num_layers_unfrozen\n )\n\n return delta_type, delta_kwargs\n\n\ndef regex_for_range(min_: int, max_: int) -> str: # noqa\n \"\"\"Returns a regex that matches all numbers in the given range.\n\n Example: regex_for_range(12, 34) -> \"1[2-9]|2\\d|3[0-4]\"\n\n Copyright (c) 2013, Dmitry Voronin. All rights reserved.\n Reference: https://github.com/voronind/range-regex\n \"\"\"\n\n def split_to_patterns(min_, max_):\n subpatterns = []\n start = min_\n for stop in split_to_ranges(min_, max_):\n subpatterns.append(range_to_pattern(start, stop))\n start = stop + 1\n return subpatterns\n\n def split_to_ranges(min_, max_):\n stops = {max_}\n nines_count = 1\n stop = fill_by_nines(min_, nines_count)\n while min_ <= stop < max_:\n stops.add(stop)\n nines_count += 1\n stop = fill_by_nines(min_, nines_count)\n zeros_count = 1\n stop = fill_by_zeros(max_ + 1, zeros_count) - 1\n while min_ < stop <= max_:\n stops.add(stop)\n zeros_count += 1\n stop = fill_by_zeros(max_ + 1, zeros_count) - 1\n stops = list(stops)\n stops.sort()\n return stops\n\n def fill_by_nines(integer, nines_count):\n return int(str(integer)[:-nines_count] + \"9\" * nines_count)\n\n def fill_by_zeros(integer, zeros_count):\n return integer - integer % 10**zeros_count\n\n def range_to_pattern(start, stop):", "metadata": {"task_id": "CarperAI--trlx/26", "ground_truth": " pattern = \"\"\n any_digit_count = 0\n for start_digit, stop_digit in zip(str(start), str(stop)):\n if start_digit == stop_digit:\n pattern += start_digit\n elif start_digit != \"0\" or stop_digit != \"9\":\n pattern += \"[{}-{}]\".format(start_digit, stop_digit)\n else:\n any_digit_count += 1\n if any_digit_count:\n pattern += r\"\\d\"\n if any_digit_count > 1:\n pattern += \"{{{}}}\".format(any_digit_count)\n return pattern\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "utils", "modeling.py"], "context_start_lineno": 157, "lineno": 489, "function_name": "range_to_pattern"}, "groundtruth": " pattern = \"\"\n any_digit_count = 0\n for start_digit, stop_digit in zip(str(start), str(stop)):\n if start_digit == stop_digit:\n pattern += start_digit\n elif start_digit != \"0\" or stop_digit != \"9\":\n pattern += \"[{}-{}]\".format(start_digit, stop_digit)\n else:\n any_digit_count += 1\n if any_digit_count:\n pattern += r\"\\d\"\n if any_digit_count > 1:\n pattern += \"{{{}}}\".format(any_digit_count)\n return pattern\n"} +{"prompt": "import gc\nimport inspect\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom torchtyping import TensorType\nfrom transformers.modeling_outputs import ModelOutput\nfrom transformers.models.bloom import modeling_bloom\nfrom transformers.models.opt import modeling_opt\n\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_decoder,\n hf_get_decoder_blocks,\n hf_get_decoder_final_norm,\n hf_get_hidden_size,\n hf_get_lm_head,\n hf_get_num_hidden_layers,\n make_head,\n whiten,\n)\n\n# KL Controllers\n\n\nclass AdaptiveKLController:\n \"\"\"Adaptive KL Controller as described in Ziegler et al. \"Fine-Tuning Language Models from Human Preferences\"\n Reference: Section 2.2 https://arxiv.org/pdf/1909.08593.pdf#page=2\n Source: https://github.com/openai/lm-human-preferences/blob/master/lm_human_preferences/train_policy.py\n \"\"\"\n\n def __init__(self, init_kl_coef: float, target: float, horizon: int):\n self.value = init_kl_coef\n self.target = target\n self.horizon = horizon\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns adaptively updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n proportional_error = np.clip(current / self.target - 1, -0.2, 0.2) # \u03f5\u209c\n mult = 1 + proportional_error * n_steps / self.horizon\n self.value *= mult # \u03b2\u209c\u208a\u2081\n\n\nclass FixedKLController:\n \"\"\"Fixed KL controller.\"\"\"\n\n def __init__(self, kl_coef):\n self.value = kl_coef\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n pass\n\n\n# PPO Configs\n\n\n@dataclass\n@register_method\nclass PPOConfig(MethodConfig):\n \"\"\"\n Config for PPO method\n\n :param ppo_epochs: Number of updates per batch\n :type ppo_epochs: int\n\n :param num_rollouts: Number of experiences to observe before learning\n :type num_rollouts: int\n\n :param init_kl_coef: Initial value for KL coefficient\n :type init_kl_coef: float\n\n :param target: Target value for KL coefficient\n :type target: float\n\n :param horizon: Number of steps for KL coefficient to reach target\n :type horizon: int\n\n :param gamma: Discount factor\n :type gamma: float\n\n :param lam: GAE lambda\n :type lam: float\n\n :param cliprange: Clipping range for PPO policy loss (1 - cliprange, 1 + cliprange)\n :type cliprange: float\n\n :param cliprange_value: Clipping range for predicted values\n (observed values - cliprange_value, observed values + cliprange_value)\n :type cliprange_value: float\n\n :param vf_coef: Value loss scale w.r.t policy loss\n :type vf_coef: float\n\n :param gen_kwargs: Additioanl kwargs for the generation\n :type gen_kwargs: Dict[str, Any]\n\n :param gen_experience_kwargs: if this is not None, then the experience is generated using this\n :type gen_experience_kwargs: Dict[str, Any]\n \"\"\"\n\n ppo_epochs: int\n num_rollouts: int\n chunk_size: int\n init_kl_coef: float\n target: float\n horizon: int\n gamma: float\n lam: float\n cliprange: float\n cliprange_value: float\n vf_coef: float\n scale_reward: str\n ref_mean: Optional[float]\n ref_std: Optional[float]\n cliprange_reward: float\n gen_kwargs: dict\n gen_experience_kwargs: Optional[dict] = None\n\n def get_advantages_and_returns(\n self,\n values: TensorType[\"batch_size\", \"response_size\"],\n rewards: TensorType[\"batch_size\", \"response_size\"],\n response_length: int,\n use_whitening: Optional[bool] = True,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Function that computes advantages and returns from rewards and values.\n Calculated as in the original PPO paper: https://arxiv.org/abs/1707.06347\n Note that rewards may include a KL divergence loss term.\n\n Advantages looks like this:\n Adv1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n - V1 + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Returns looks like this:\n Ret1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Args:\n values: Tensor of shape (batch_size, response_size)\n rewards: Tensor of shape (batch_size, response_size)\n response_length: Length of the response sequence\n use_whitening: Whether to use whitening (ie. normalize advantages) or not\n \"\"\"\n lastgaelam = 0\n advantages_reversed = []\n for t in reversed(range(response_length)):\n nextvalues = values[:, t + 1] if t < response_length - 1 else 0.0\n delta = rewards[:, t] + self.gamma * nextvalues - values[:, t]\n lastgaelam = delta + self.gamma * self.lam * lastgaelam\n advantages_reversed.append(lastgaelam)\n advantages = torch.stack(advantages_reversed[::-1], dim=1)\n returns = advantages + values\n if use_whitening:\n advantages = whiten(advantages)\n return advantages.detach(), returns\n\n def loss(\n self,\n logprobs: TensorType[\"batch_size\", \"response_size\"],\n values: TensorType[\"batch_size\", \"response_size\"],\n old_logprobs: TensorType[\"batch_size\", \"response_size\"],\n old_values: TensorType[\"batch_size\", \"response_size\"],\n advantages: TensorType[\"batch_size\", \"response_size\"],\n returns: TensorType[\"batch_size\", \"response_size\"],\n mask: TensorType[\"batch_size\", \"response_size\"],\n ):\n \"\"\"PPO objective function.\n References:\n - https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\n \"\"\"\n values_clipped = torch.clamp(\n values,\n old_values - self.cliprange_value,\n old_values + self.cliprange_value,\n )\n n = mask.sum()\n\n vf_loss1 = (values - returns) ** 2\n vf_loss2 = (values_clipped - returns) ** 2\n vf_loss = 0.5 * torch.sum(torch.max(vf_loss1, vf_loss2) * mask) / n\n vf_clipfrac = torch.sum((vf_loss2 > vf_loss1).float() * mask) / n\n\n log_ratio = (logprobs - old_logprobs) * mask\n ratio = torch.exp(log_ratio)\n # Unbiased KL-div estimates (`k3`). Ref: http://joschu.net/blog/kl-approx.html\n with torch.no_grad():\n approx_kl = torch.mean((ratio - 1) - log_ratio)\n\n pg_loss1 = -advantages * ratio\n pg_loss2 = -advantages * torch.clamp(\n ratio,\n 1.0 - self.cliprange,\n 1.0 + self.cliprange,\n )\n pg_loss = torch.sum(torch.max(pg_loss1, pg_loss2) * mask) / n\n pg_clipfrac = torch.sum((pg_loss2 > pg_loss1).float() * mask) / n\n\n loss = pg_loss + self.vf_coef * vf_loss\n\n stats = dict(\n losses=dict(\n total_loss=loss.item(),\n policy_loss=pg_loss.item(),\n value_loss=vf_loss.item(),\n ),\n values=dict(\n get_tensor_stats(values, mask, n),\n values_error=torch.sum(((values - returns) * mask) ** 2) / n,\n clipfrac=vf_clipfrac,\n ),\n old_values=get_tensor_stats(old_values, mask, n),\n returns=get_tensor_stats(returns, mask, n),\n policy=dict(approx_kl=approx_kl.item(), clipfrac=pg_clipfrac.item()),\n ratio=(ratio * mask).sum() / n,\n padding_percentage=n / mask.numel(),\n )\n\n return loss, flatten_dict(stats)\n\n\n# CausalLM architectures\n\n\n@dataclass\nclass CausalLMOutputWithValue(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n value: Optional[torch.FloatTensor] = None\n\n\nclass AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models that have a\n language modeling head and a value head\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"v_head\"]\n _supported_args = []\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n ):\n super().__init__(base_model)\n self.v_head = make_head(hf_get_hidden_size(self.base_model.config), 1)\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n position_ids: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithValue]:", "metadata": {"task_id": "CarperAI--trlx/27", "ground_truth": " forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n value = self.v_head(outputs.hidden_states[-1]).squeeze(-1)\n\n if not return_dict:\n outputs = (outputs.logits,) + outputs[1:] + (value,)\n return outputs\n\n return CausalLMOutputWithValue(**outputs, value=value)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ppo.py"], "context_start_lineno": 0, "lineno": 278, "function_name": "forward"}, "groundtruth": " forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n value = self.v_head(outputs.hidden_states[-1]).squeeze(-1)\n\n if not return_dict:\n outputs = (outputs.logits,) + outputs[1:] + (value,)\n return outputs\n\n return CausalLMOutputWithValue(**outputs, value=value)\n"} +{"prompt": "import gc\nimport inspect\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom torchtyping import TensorType\nfrom transformers.modeling_outputs import ModelOutput\nfrom transformers.models.bloom import modeling_bloom\nfrom transformers.models.opt import modeling_opt\n\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_decoder,\n hf_get_decoder_blocks,\n hf_get_decoder_final_norm,\n hf_get_hidden_size,\n hf_get_lm_head,\n hf_get_num_hidden_layers,\n make_head,\n whiten,\n)\n\n# KL Controllers\n\n\nclass AdaptiveKLController:\n \"\"\"Adaptive KL Controller as described in Ziegler et al. \"Fine-Tuning Language Models from Human Preferences\"\n Reference: Section 2.2 https://arxiv.org/pdf/1909.08593.pdf#page=2\n Source: https://github.com/openai/lm-human-preferences/blob/master/lm_human_preferences/train_policy.py\n \"\"\"\n\n def __init__(self, init_kl_coef: float, target: float, horizon: int):\n self.value = init_kl_coef\n self.target = target\n self.horizon = horizon\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns adaptively updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n proportional_error = np.clip(current / self.target - 1, -0.2, 0.2) # \u03f5\u209c\n mult = 1 + proportional_error * n_steps / self.horizon\n self.value *= mult # \u03b2\u209c\u208a\u2081\n\n\nclass FixedKLController:\n \"\"\"Fixed KL controller.\"\"\"\n\n def __init__(self, kl_coef):\n self.value = kl_coef\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n pass\n\n\n# PPO Configs\n\n\n@dataclass\n@register_method\nclass PPOConfig(MethodConfig):\n \"\"\"\n Config for PPO method\n\n :param ppo_epochs: Number of updates per batch\n :type ppo_epochs: int\n\n :param num_rollouts: Number of experiences to observe before learning\n :type num_rollouts: int\n\n :param init_kl_coef: Initial value for KL coefficient\n :type init_kl_coef: float\n\n :param target: Target value for KL coefficient\n :type target: float\n\n :param horizon: Number of steps for KL coefficient to reach target\n :type horizon: int\n\n :param gamma: Discount factor\n :type gamma: float\n\n :param lam: GAE lambda\n :type lam: float\n\n :param cliprange: Clipping range for PPO policy loss (1 - cliprange, 1 + cliprange)\n :type cliprange: float\n\n :param cliprange_value: Clipping range for predicted values\n (observed values - cliprange_value, observed values + cliprange_value)\n :type cliprange_value: float\n\n :param vf_coef: Value loss scale w.r.t policy loss\n :type vf_coef: float\n\n :param gen_kwargs: Additioanl kwargs for the generation\n :type gen_kwargs: Dict[str, Any]\n\n :param gen_experience_kwargs: if this is not None, then the experience is generated using this\n :type gen_experience_kwargs: Dict[str, Any]\n \"\"\"\n\n ppo_epochs: int\n num_rollouts: int\n chunk_size: int\n init_kl_coef: float\n target: float\n horizon: int\n gamma: float\n lam: float\n cliprange: float\n cliprange_value: float\n vf_coef: float\n scale_reward: str\n ref_mean: Optional[float]\n ref_std: Optional[float]\n cliprange_reward: float\n gen_kwargs: dict\n gen_experience_kwargs: Optional[dict] = None\n\n def get_advantages_and_returns(\n self,\n values: TensorType[\"batch_size\", \"response_size\"],\n rewards: TensorType[\"batch_size\", \"response_size\"],\n response_length: int,\n use_whitening: Optional[bool] = True,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Function that computes advantages and returns from rewards and values.\n Calculated as in the original PPO paper: https://arxiv.org/abs/1707.06347\n Note that rewards may include a KL divergence loss term.\n\n Advantages looks like this:\n Adv1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n - V1 + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Returns looks like this:\n Ret1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Args:\n values: Tensor of shape (batch_size, response_size)\n rewards: Tensor of shape (batch_size, response_size)\n response_length: Length of the response sequence\n use_whitening: Whether to use whitening (ie. normalize advantages) or not\n \"\"\"\n lastgaelam = 0\n advantages_reversed = []\n for t in reversed(range(response_length)):\n nextvalues = values[:, t + 1] if t < response_length - 1 else 0.0\n delta = rewards[:, t] + self.gamma * nextvalues - values[:, t]\n lastgaelam = delta + self.gamma * self.lam * lastgaelam\n advantages_reversed.append(lastgaelam)\n advantages = torch.stack(advantages_reversed[::-1], dim=1)\n returns = advantages + values\n if use_whitening:\n advantages = whiten(advantages)\n return advantages.detach(), returns\n\n def loss(\n self,\n logprobs: TensorType[\"batch_size\", \"response_size\"],\n values: TensorType[\"batch_size\", \"response_size\"],\n old_logprobs: TensorType[\"batch_size\", \"response_size\"],\n old_values: TensorType[\"batch_size\", \"response_size\"],\n advantages: TensorType[\"batch_size\", \"response_size\"],\n returns: TensorType[\"batch_size\", \"response_size\"],\n mask: TensorType[\"batch_size\", \"response_size\"],\n ):\n \"\"\"PPO objective function.\n References:\n - https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\n \"\"\"\n values_clipped = torch.clamp(\n values,\n old_values - self.cliprange_value,\n old_values + self.cliprange_value,\n )\n n = mask.sum()\n\n vf_loss1 = (values - returns) ** 2\n vf_loss2 = (values_clipped - returns) ** 2\n vf_loss = 0.5 * torch.sum(torch.max(vf_loss1, vf_loss2) * mask) / n\n vf_clipfrac = torch.sum((vf_loss2 > vf_loss1).float() * mask) / n\n\n log_ratio = (logprobs - old_logprobs) * mask\n ratio = torch.exp(log_ratio)\n # Unbiased KL-div estimates (`k3`). Ref: http://joschu.net/blog/kl-approx.html\n with torch.no_grad():\n approx_kl = torch.mean((ratio - 1) - log_ratio)\n\n pg_loss1 = -advantages * ratio\n pg_loss2 = -advantages * torch.clamp(\n ratio,\n 1.0 - self.cliprange,\n 1.0 + self.cliprange,\n )\n pg_loss = torch.sum(torch.max(pg_loss1, pg_loss2) * mask) / n\n pg_clipfrac = torch.sum((pg_loss2 > pg_loss1).float() * mask) / n\n\n loss = pg_loss + self.vf_coef * vf_loss\n\n stats = dict(\n losses=dict(\n total_loss=loss.item(),\n policy_loss=pg_loss.item(),\n value_loss=vf_loss.item(),\n ),\n values=dict(\n get_tensor_stats(values, mask, n),\n values_error=torch.sum(((values - returns) * mask) ** 2) / n,\n clipfrac=vf_clipfrac,\n ),\n old_values=get_tensor_stats(old_values, mask, n),\n returns=get_tensor_stats(returns, mask, n),\n policy=dict(approx_kl=approx_kl.item(), clipfrac=pg_clipfrac.item()),\n ratio=(ratio * mask).sum() / n,\n padding_percentage=n / mask.numel(),\n )\n\n return loss, flatten_dict(stats)\n\n\n# CausalLM architectures\n\n\n@dataclass\nclass CausalLMOutputWithValue(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n value: Optional[torch.FloatTensor] = None\n\n\nclass AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models that have a\n language modeling head and a value head\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"v_head\"]\n _supported_args = []\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n ):\n super().__init__(base_model)\n self.v_head = make_head(hf_get_hidden_size(self.base_model.config), 1)\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n position_ids: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithValue]:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n value = self.v_head(outputs.hidden_states[-1]).squeeze(-1)\n\n if not return_dict:\n outputs = (outputs.logits,) + outputs[1:] + (value,)\n return outputs\n\n return CausalLMOutputWithValue(**outputs, value=value)\n\n def generate(self, *args, **kwargs) -> Union[ModelOutput, torch.LongTensor]:\n return self.base_model.generate(*args, **kwargs)\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the value head\n to the state dictionary of the wrapped model by prepending the key with `v_head.`.\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/28", "ground_truth": " base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n base_model_state_dict[f\"v_head.{k}\"] = v\n return base_model_state_dict\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ppo.py"], "context_start_lineno": 0, "lineno": 310, "function_name": "state_dict"}, "groundtruth": " base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n base_model_state_dict[f\"v_head.{k}\"] = v\n return base_model_state_dict\n"} +{"prompt": "import gc\nimport inspect\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom torchtyping import TensorType\nfrom transformers.modeling_outputs import ModelOutput\nfrom transformers.models.bloom import modeling_bloom\nfrom transformers.models.opt import modeling_opt\n\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_decoder,\n hf_get_decoder_blocks,\n hf_get_decoder_final_norm,\n hf_get_hidden_size,\n hf_get_lm_head,\n hf_get_num_hidden_layers,\n make_head,\n whiten,\n)\n\n# KL Controllers\n\n\nclass AdaptiveKLController:\n \"\"\"Adaptive KL Controller as described in Ziegler et al. \"Fine-Tuning Language Models from Human Preferences\"\n Reference: Section 2.2 https://arxiv.org/pdf/1909.08593.pdf#page=2\n Source: https://github.com/openai/lm-human-preferences/blob/master/lm_human_preferences/train_policy.py\n \"\"\"\n\n def __init__(self, init_kl_coef: float, target: float, horizon: int):\n self.value = init_kl_coef\n self.target = target\n self.horizon = horizon\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns adaptively updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n proportional_error = np.clip(current / self.target - 1, -0.2, 0.2) # \u03f5\u209c\n mult = 1 + proportional_error * n_steps / self.horizon\n self.value *= mult # \u03b2\u209c\u208a\u2081\n\n\nclass FixedKLController:\n \"\"\"Fixed KL controller.\"\"\"\n\n def __init__(self, kl_coef):\n self.value = kl_coef\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n pass\n\n\n# PPO Configs\n\n\n@dataclass\n@register_method\nclass PPOConfig(MethodConfig):\n \"\"\"\n Config for PPO method\n\n :param ppo_epochs: Number of updates per batch\n :type ppo_epochs: int\n\n :param num_rollouts: Number of experiences to observe before learning\n :type num_rollouts: int\n\n :param init_kl_coef: Initial value for KL coefficient\n :type init_kl_coef: float\n\n :param target: Target value for KL coefficient\n :type target: float\n\n :param horizon: Number of steps for KL coefficient to reach target\n :type horizon: int\n\n :param gamma: Discount factor\n :type gamma: float\n\n :param lam: GAE lambda\n :type lam: float\n\n :param cliprange: Clipping range for PPO policy loss (1 - cliprange, 1 + cliprange)\n :type cliprange: float\n\n :param cliprange_value: Clipping range for predicted values\n (observed values - cliprange_value, observed values + cliprange_value)\n :type cliprange_value: float\n\n :param vf_coef: Value loss scale w.r.t policy loss\n :type vf_coef: float\n\n :param gen_kwargs: Additioanl kwargs for the generation\n :type gen_kwargs: Dict[str, Any]\n\n :param gen_experience_kwargs: if this is not None, then the experience is generated using this\n :type gen_experience_kwargs: Dict[str, Any]\n \"\"\"\n\n ppo_epochs: int\n num_rollouts: int\n chunk_size: int\n init_kl_coef: float\n target: float\n horizon: int\n gamma: float\n lam: float\n cliprange: float\n cliprange_value: float\n vf_coef: float\n scale_reward: str\n ref_mean: Optional[float]\n ref_std: Optional[float]\n cliprange_reward: float\n gen_kwargs: dict\n gen_experience_kwargs: Optional[dict] = None\n\n def get_advantages_and_returns(\n self,\n values: TensorType[\"batch_size\", \"response_size\"],\n rewards: TensorType[\"batch_size\", \"response_size\"],\n response_length: int,\n use_whitening: Optional[bool] = True,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Function that computes advantages and returns from rewards and values.\n Calculated as in the original PPO paper: https://arxiv.org/abs/1707.06347\n Note that rewards may include a KL divergence loss term.\n\n Advantages looks like this:\n Adv1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n - V1 + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Returns looks like this:\n Ret1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Args:\n values: Tensor of shape (batch_size, response_size)\n rewards: Tensor of shape (batch_size, response_size)\n response_length: Length of the response sequence\n use_whitening: Whether to use whitening (ie. normalize advantages) or not\n \"\"\"\n lastgaelam = 0\n advantages_reversed = []\n for t in reversed(range(response_length)):\n nextvalues = values[:, t + 1] if t < response_length - 1 else 0.0\n delta = rewards[:, t] + self.gamma * nextvalues - values[:, t]\n lastgaelam = delta + self.gamma * self.lam * lastgaelam\n advantages_reversed.append(lastgaelam)\n advantages = torch.stack(advantages_reversed[::-1], dim=1)\n returns = advantages + values\n if use_whitening:\n advantages = whiten(advantages)\n return advantages.detach(), returns\n\n def loss(\n self,\n logprobs: TensorType[\"batch_size\", \"response_size\"],\n values: TensorType[\"batch_size\", \"response_size\"],\n old_logprobs: TensorType[\"batch_size\", \"response_size\"],\n old_values: TensorType[\"batch_size\", \"response_size\"],\n advantages: TensorType[\"batch_size\", \"response_size\"],\n returns: TensorType[\"batch_size\", \"response_size\"],\n mask: TensorType[\"batch_size\", \"response_size\"],\n ):\n \"\"\"PPO objective function.\n References:\n - https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\n \"\"\"\n values_clipped = torch.clamp(\n values,\n old_values - self.cliprange_value,\n old_values + self.cliprange_value,\n )\n n = mask.sum()\n\n vf_loss1 = (values - returns) ** 2\n vf_loss2 = (values_clipped - returns) ** 2\n vf_loss = 0.5 * torch.sum(torch.max(vf_loss1, vf_loss2) * mask) / n\n vf_clipfrac = torch.sum((vf_loss2 > vf_loss1).float() * mask) / n\n\n log_ratio = (logprobs - old_logprobs) * mask\n ratio = torch.exp(log_ratio)\n # Unbiased KL-div estimates (`k3`). Ref: http://joschu.net/blog/kl-approx.html\n with torch.no_grad():\n approx_kl = torch.mean((ratio - 1) - log_ratio)\n\n pg_loss1 = -advantages * ratio\n pg_loss2 = -advantages * torch.clamp(\n ratio,\n 1.0 - self.cliprange,\n 1.0 + self.cliprange,\n )\n pg_loss = torch.sum(torch.max(pg_loss1, pg_loss2) * mask) / n\n pg_clipfrac = torch.sum((pg_loss2 > pg_loss1).float() * mask) / n\n\n loss = pg_loss + self.vf_coef * vf_loss\n\n stats = dict(\n losses=dict(\n total_loss=loss.item(),\n policy_loss=pg_loss.item(),\n value_loss=vf_loss.item(),\n ),\n values=dict(\n get_tensor_stats(values, mask, n),\n values_error=torch.sum(((values - returns) * mask) ** 2) / n,\n clipfrac=vf_clipfrac,\n ),\n old_values=get_tensor_stats(old_values, mask, n),\n returns=get_tensor_stats(returns, mask, n),\n policy=dict(approx_kl=approx_kl.item(), clipfrac=pg_clipfrac.item()),\n ratio=(ratio * mask).sum() / n,\n padding_percentage=n / mask.numel(),\n )\n\n return loss, flatten_dict(stats)\n\n\n# CausalLM architectures\n\n\n@dataclass\nclass CausalLMOutputWithValue(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n value: Optional[torch.FloatTensor] = None\n\n\nclass AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models that have a\n language modeling head and a value head\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"v_head\"]\n _supported_args = []\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n ):\n super().__init__(base_model)\n self.v_head = make_head(hf_get_hidden_size(self.base_model.config), 1)\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n position_ids: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithValue]:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n value = self.v_head(outputs.hidden_states[-1]).squeeze(-1)\n\n if not return_dict:\n outputs = (outputs.logits,) + outputs[1:] + (value,)\n return outputs\n\n return CausalLMOutputWithValue(**outputs, value=value)\n\n def generate(self, *args, **kwargs) -> Union[ModelOutput, torch.LongTensor]:\n return self.base_model.generate(*args, **kwargs)\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the value head\n to the state dictionary of the wrapped model by prepending the key with `v_head.`.\n \"\"\"\n base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n base_model_state_dict[f\"v_head.{k}\"] = v\n return base_model_state_dict\n\n def post_init(self, state_dict):\n \"\"\"\n Adds the state dictionary of the value head to the state dictionary of the wrapped model\n by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the\n keys of the value head state dictionary.\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/29", "ground_truth": " for k in list(state_dict.keys()):\n if \"v_head.\" in k:\n state_dict[k.replace(\"v_head.\", \"\")] = state_dict.pop(k)\n self.v_head.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect() # noqa: E702\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ppo.py"], "context_start_lineno": 0, "lineno": 322, "function_name": "post_init"}, "groundtruth": " for k in list(state_dict.keys()):\n if \"v_head.\" in k:\n state_dict[k.replace(\"v_head.\", \"\")] = state_dict.pop(k)\n self.v_head.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect() # noqa: E702\n"} +{"prompt": "import gc\nimport inspect\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom typing import List, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom torchtyping import TensorType\nfrom transformers.modeling_outputs import ModelOutput\nfrom transformers.models.bloom import modeling_bloom\nfrom transformers.models.opt import modeling_opt\n\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_decoder,\n hf_get_decoder_blocks,\n hf_get_decoder_final_norm,\n hf_get_hidden_size,\n hf_get_lm_head,\n hf_get_num_hidden_layers,\n make_head,\n whiten,\n)\n\n# KL Controllers\n\n\nclass AdaptiveKLController:\n \"\"\"Adaptive KL Controller as described in Ziegler et al. \"Fine-Tuning Language Models from Human Preferences\"\n Reference: Section 2.2 https://arxiv.org/pdf/1909.08593.pdf#page=2\n Source: https://github.com/openai/lm-human-preferences/blob/master/lm_human_preferences/train_policy.py\n \"\"\"\n\n def __init__(self, init_kl_coef: float, target: float, horizon: int):\n self.value = init_kl_coef\n self.target = target\n self.horizon = horizon\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns adaptively updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n proportional_error = np.clip(current / self.target - 1, -0.2, 0.2) # \u03f5\u209c\n mult = 1 + proportional_error * n_steps / self.horizon\n self.value *= mult # \u03b2\u209c\u208a\u2081\n\n\nclass FixedKLController:\n \"\"\"Fixed KL controller.\"\"\"\n\n def __init__(self, kl_coef):\n self.value = kl_coef\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n pass\n\n\n# PPO Configs\n\n\n@dataclass\n@register_method\nclass PPOConfig(MethodConfig):\n \"\"\"\n Config for PPO method\n\n :param ppo_epochs: Number of updates per batch\n :type ppo_epochs: int\n\n :param num_rollouts: Number of experiences to observe before learning\n :type num_rollouts: int\n\n :param init_kl_coef: Initial value for KL coefficient\n :type init_kl_coef: float\n\n :param target: Target value for KL coefficient\n :type target: float\n\n :param horizon: Number of steps for KL coefficient to reach target\n :type horizon: int\n\n :param gamma: Discount factor\n :type gamma: float\n\n :param lam: GAE lambda\n :type lam: float\n\n :param cliprange: Clipping range for PPO policy loss (1 - cliprange, 1 + cliprange)\n :type cliprange: float\n\n :param cliprange_value: Clipping range for predicted values\n (observed values - cliprange_value, observed values + cliprange_value)\n :type cliprange_value: float\n\n :param vf_coef: Value loss scale w.r.t policy loss\n :type vf_coef: float\n\n :param gen_kwargs: Additioanl kwargs for the generation\n :type gen_kwargs: Dict[str, Any]\n\n :param gen_experience_kwargs: if this is not None, then the experience is generated using this\n :type gen_experience_kwargs: Dict[str, Any]\n \"\"\"\n\n ppo_epochs: int\n num_rollouts: int\n chunk_size: int\n init_kl_coef: float\n target: float\n horizon: int\n gamma: float\n lam: float\n cliprange: float\n cliprange_value: float\n vf_coef: float\n scale_reward: str\n ref_mean: Optional[float]\n ref_std: Optional[float]\n cliprange_reward: float\n gen_kwargs: dict\n gen_experience_kwargs: Optional[dict] = None\n\n def get_advantages_and_returns(\n self,\n values: TensorType[\"batch_size\", \"response_size\"],\n rewards: TensorType[\"batch_size\", \"response_size\"],\n response_length: int,\n use_whitening: Optional[bool] = True,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Function that computes advantages and returns from rewards and values.\n Calculated as in the original PPO paper: https://arxiv.org/abs/1707.06347\n Note that rewards may include a KL divergence loss term.\n\n Advantages looks like this:\n Adv1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n - V1 + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Returns looks like this:\n Ret1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Args:\n values: Tensor of shape (batch_size, response_size)\n rewards: Tensor of shape (batch_size, response_size)\n response_length: Length of the response sequence\n use_whitening: Whether to use whitening (ie. normalize advantages) or not\n \"\"\"\n lastgaelam = 0\n advantages_reversed = []\n for t in reversed(range(response_length)):\n nextvalues = values[:, t + 1] if t < response_length - 1 else 0.0\n delta = rewards[:, t] + self.gamma * nextvalues - values[:, t]\n lastgaelam = delta + self.gamma * self.lam * lastgaelam\n advantages_reversed.append(lastgaelam)\n advantages = torch.stack(advantages_reversed[::-1], dim=1)\n returns = advantages + values\n if use_whitening:\n advantages = whiten(advantages)\n return advantages.detach(), returns\n\n def loss(\n self,\n logprobs: TensorType[\"batch_size\", \"response_size\"],\n values: TensorType[\"batch_size\", \"response_size\"],\n old_logprobs: TensorType[\"batch_size\", \"response_size\"],\n old_values: TensorType[\"batch_size\", \"response_size\"],\n advantages: TensorType[\"batch_size\", \"response_size\"],\n returns: TensorType[\"batch_size\", \"response_size\"],\n mask: TensorType[\"batch_size\", \"response_size\"],\n ):\n \"\"\"PPO objective function.\n References:\n - https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\n \"\"\"\n values_clipped = torch.clamp(\n values,\n old_values - self.cliprange_value,\n old_values + self.cliprange_value,\n )\n n = mask.sum()\n\n vf_loss1 = (values - returns) ** 2\n vf_loss2 = (values_clipped - returns) ** 2\n vf_loss = 0.5 * torch.sum(torch.max(vf_loss1, vf_loss2) * mask) / n\n vf_clipfrac = torch.sum((vf_loss2 > vf_loss1).float() * mask) / n\n\n log_ratio = (logprobs - old_logprobs) * mask\n ratio = torch.exp(log_ratio)\n # Unbiased KL-div estimates (`k3`). Ref: http://joschu.net/blog/kl-approx.html\n with torch.no_grad():\n approx_kl = torch.mean((ratio - 1) - log_ratio)\n\n pg_loss1 = -advantages * ratio\n pg_loss2 = -advantages * torch.clamp(\n ratio,\n 1.0 - self.cliprange,\n 1.0 + self.cliprange,\n )\n pg_loss = torch.sum(torch.max(pg_loss1, pg_loss2) * mask) / n\n pg_clipfrac = torch.sum((pg_loss2 > pg_loss1).float() * mask) / n\n\n loss = pg_loss + self.vf_coef * vf_loss\n\n stats = dict(\n losses=dict(\n total_loss=loss.item(),\n policy_loss=pg_loss.item(),\n value_loss=vf_loss.item(),\n ),\n values=dict(\n get_tensor_stats(values, mask, n),\n values_error=torch.sum(((values - returns) * mask) ** 2) / n,\n clipfrac=vf_clipfrac,\n ),\n old_values=get_tensor_stats(old_values, mask, n),\n returns=get_tensor_stats(returns, mask, n),\n policy=dict(approx_kl=approx_kl.item(), clipfrac=pg_clipfrac.item()),\n ratio=(ratio * mask).sum() / n,\n padding_percentage=n / mask.numel(),\n )\n\n return loss, flatten_dict(stats)\n\n\n# CausalLM architectures\n\n\n@dataclass\nclass CausalLMOutputWithValue(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n value: Optional[torch.FloatTensor] = None\n\n\nclass AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models that have a\n language modeling head and a value head\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"v_head\"]\n _supported_args = []\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n ):\n super().__init__(base_model)\n self.v_head = make_head(hf_get_hidden_size(self.base_model.config), 1)\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n position_ids: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithValue]:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n value = self.v_head(outputs.hidden_states[-1]).squeeze(-1)\n\n if not return_dict:\n outputs = (outputs.logits,) + outputs[1:] + (value,)\n return outputs\n\n return CausalLMOutputWithValue(**outputs, value=value)\n\n def generate(self, *args, **kwargs) -> Union[ModelOutput, torch.LongTensor]:\n return self.base_model.generate(*args, **kwargs)\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the value head\n to the state dictionary of the wrapped model by prepending the key with `v_head.`.\n \"\"\"\n base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n base_model_state_dict[f\"v_head.{k}\"] = v\n return base_model_state_dict\n\n def post_init(self, state_dict):\n \"\"\"\n Adds the state dictionary of the value head to the state dictionary of the wrapped model\n by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the\n keys of the value head state dictionary.\n \"\"\"\n for k in list(state_dict.keys()):\n if \"v_head.\" in k:\n state_dict[k.replace(\"v_head.\", \"\")] = state_dict.pop(k)\n self.v_head.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect() # noqa: E702\n\n\nclass AutoModelForCausalLMWithHydraValueHead(AutoModelForCausalLMWithValueHead):\n _supported_modules = [\"v_head\", \"frozen_head\"]\n _supported_args = [\"num_layers_unfrozen\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n num_layers_unfrozen: int = -1,\n ):", "metadata": {"task_id": "CarperAI--trlx/30", "ground_truth": " super().__init__(base_model)\n self.num_layers_unfrozen = num_layers_unfrozen\n if self.num_layers_unfrozen > 0:\n config = self.base_model.config\n branch_class = hf_get_branch_class(config)\n self.frozen_head = branch_class(\n self.base_model,\n num_layers_unfrozen=self.num_layers_unfrozen,\n ).eval()\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ppo.py"], "context_start_lineno": 0, "lineno": 340, "function_name": "__init__"}, "groundtruth": " super().__init__(base_model)\n self.num_layers_unfrozen = num_layers_unfrozen\n if self.num_layers_unfrozen > 0:\n config = self.base_model.config\n branch_class = hf_get_branch_class(config)\n self.frozen_head = branch_class(\n self.base_model,\n num_layers_unfrozen=self.num_layers_unfrozen,\n ).eval()\n"} +{"prompt": " n_steps: int):\n \"\"\"Returns updated KL coefficient, \u03b2\u209c\u208a\u2081.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n pass\n\n\n# PPO Configs\n\n\n@dataclass\n@register_method\nclass PPOConfig(MethodConfig):\n \"\"\"\n Config for PPO method\n\n :param ppo_epochs: Number of updates per batch\n :type ppo_epochs: int\n\n :param num_rollouts: Number of experiences to observe before learning\n :type num_rollouts: int\n\n :param init_kl_coef: Initial value for KL coefficient\n :type init_kl_coef: float\n\n :param target: Target value for KL coefficient\n :type target: float\n\n :param horizon: Number of steps for KL coefficient to reach target\n :type horizon: int\n\n :param gamma: Discount factor\n :type gamma: float\n\n :param lam: GAE lambda\n :type lam: float\n\n :param cliprange: Clipping range for PPO policy loss (1 - cliprange, 1 + cliprange)\n :type cliprange: float\n\n :param cliprange_value: Clipping range for predicted values\n (observed values - cliprange_value, observed values + cliprange_value)\n :type cliprange_value: float\n\n :param vf_coef: Value loss scale w.r.t policy loss\n :type vf_coef: float\n\n :param gen_kwargs: Additioanl kwargs for the generation\n :type gen_kwargs: Dict[str, Any]\n\n :param gen_experience_kwargs: if this is not None, then the experience is generated using this\n :type gen_experience_kwargs: Dict[str, Any]\n \"\"\"\n\n ppo_epochs: int\n num_rollouts: int\n chunk_size: int\n init_kl_coef: float\n target: float\n horizon: int\n gamma: float\n lam: float\n cliprange: float\n cliprange_value: float\n vf_coef: float\n scale_reward: str\n ref_mean: Optional[float]\n ref_std: Optional[float]\n cliprange_reward: float\n gen_kwargs: dict\n gen_experience_kwargs: Optional[dict] = None\n\n def get_advantages_and_returns(\n self,\n values: TensorType[\"batch_size\", \"response_size\"],\n rewards: TensorType[\"batch_size\", \"response_size\"],\n response_length: int,\n use_whitening: Optional[bool] = True,\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Function that computes advantages and returns from rewards and values.\n Calculated as in the original PPO paper: https://arxiv.org/abs/1707.06347\n Note that rewards may include a KL divergence loss term.\n\n Advantages looks like this:\n Adv1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n - V1 + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Returns looks like this:\n Ret1 = R1 + \u03b3 * \u03bb * R2 + \u03b3^2 * \u03bb^2 * R3 + ...\n + \u03b3 * (1 - \u03bb) V2 + \u03b3^2 * \u03bb * (1 - \u03bb) V3 + ...\n\n Args:\n values: Tensor of shape (batch_size, response_size)\n rewards: Tensor of shape (batch_size, response_size)\n response_length: Length of the response sequence\n use_whitening: Whether to use whitening (ie. normalize advantages) or not\n \"\"\"\n lastgaelam = 0\n advantages_reversed = []\n for t in reversed(range(response_length)):\n nextvalues = values[:, t + 1] if t < response_length - 1 else 0.0\n delta = rewards[:, t] + self.gamma * nextvalues - values[:, t]\n lastgaelam = delta + self.gamma * self.lam * lastgaelam\n advantages_reversed.append(lastgaelam)\n advantages = torch.stack(advantages_reversed[::-1], dim=1)\n returns = advantages + values\n if use_whitening:\n advantages = whiten(advantages)\n return advantages.detach(), returns\n\n def loss(\n self,\n logprobs: TensorType[\"batch_size\", \"response_size\"],\n values: TensorType[\"batch_size\", \"response_size\"],\n old_logprobs: TensorType[\"batch_size\", \"response_size\"],\n old_values: TensorType[\"batch_size\", \"response_size\"],\n advantages: TensorType[\"batch_size\", \"response_size\"],\n returns: TensorType[\"batch_size\", \"response_size\"],\n mask: TensorType[\"batch_size\", \"response_size\"],\n ):\n \"\"\"PPO objective function.\n References:\n - https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\n \"\"\"\n values_clipped = torch.clamp(\n values,\n old_values - self.cliprange_value,\n old_values + self.cliprange_value,\n )\n n = mask.sum()\n\n vf_loss1 = (values - returns) ** 2\n vf_loss2 = (values_clipped - returns) ** 2\n vf_loss = 0.5 * torch.sum(torch.max(vf_loss1, vf_loss2) * mask) / n\n vf_clipfrac = torch.sum((vf_loss2 > vf_loss1).float() * mask) / n\n\n log_ratio = (logprobs - old_logprobs) * mask\n ratio = torch.exp(log_ratio)\n # Unbiased KL-div estimates (`k3`). Ref: http://joschu.net/blog/kl-approx.html\n with torch.no_grad():\n approx_kl = torch.mean((ratio - 1) - log_ratio)\n\n pg_loss1 = -advantages * ratio\n pg_loss2 = -advantages * torch.clamp(\n ratio,\n 1.0 - self.cliprange,\n 1.0 + self.cliprange,\n )\n pg_loss = torch.sum(torch.max(pg_loss1, pg_loss2) * mask) / n\n pg_clipfrac = torch.sum((pg_loss2 > pg_loss1).float() * mask) / n\n\n loss = pg_loss + self.vf_coef * vf_loss\n\n stats = dict(\n losses=dict(\n total_loss=loss.item(),\n policy_loss=pg_loss.item(),\n value_loss=vf_loss.item(),\n ),\n values=dict(\n get_tensor_stats(values, mask, n),\n values_error=torch.sum(((values - returns) * mask) ** 2) / n,\n clipfrac=vf_clipfrac,\n ),\n old_values=get_tensor_stats(old_values, mask, n),\n returns=get_tensor_stats(returns, mask, n),\n policy=dict(approx_kl=approx_kl.item(), clipfrac=pg_clipfrac.item()),\n ratio=(ratio * mask).sum() / n,\n padding_percentage=n / mask.numel(),\n )\n\n return loss, flatten_dict(stats)\n\n\n# CausalLM architectures\n\n\n@dataclass\nclass CausalLMOutputWithValue(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n value: Optional[torch.FloatTensor] = None\n\n\nclass AutoModelForCausalLMWithValueHead(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models that have a\n language modeling head and a value head\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"v_head\"]\n _supported_args = []\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n ):\n super().__init__(base_model)\n self.v_head = make_head(hf_get_hidden_size(self.base_model.config), 1)\n\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n position_ids: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithValue]:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n value = self.v_head(outputs.hidden_states[-1]).squeeze(-1)\n\n if not return_dict:\n outputs = (outputs.logits,) + outputs[1:] + (value,)\n return outputs\n\n return CausalLMOutputWithValue(**outputs, value=value)\n\n def generate(self, *args, **kwargs) -> Union[ModelOutput, torch.LongTensor]:\n return self.base_model.generate(*args, **kwargs)\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the value head\n to the state dictionary of the wrapped model by prepending the key with `v_head.`.\n \"\"\"\n base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n base_model_state_dict[f\"v_head.{k}\"] = v\n return base_model_state_dict\n\n def post_init(self, state_dict):\n \"\"\"\n Adds the state dictionary of the value head to the state dictionary of the wrapped model\n by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the\n keys of the value head state dictionary.\n \"\"\"\n for k in list(state_dict.keys()):\n if \"v_head.\" in k:\n state_dict[k.replace(\"v_head.\", \"\")] = state_dict.pop(k)\n self.v_head.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect() # noqa: E702\n\n\nclass AutoModelForCausalLMWithHydraValueHead(AutoModelForCausalLMWithValueHead):\n _supported_modules = [\"v_head\", \"frozen_head\"]\n _supported_args = [\"num_layers_unfrozen\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n num_layers_unfrozen: int = -1,\n ):\n super().__init__(base_model)\n self.num_layers_unfrozen = num_layers_unfrozen\n if self.num_layers_unfrozen > 0:\n config = self.base_model.config\n branch_class = hf_get_branch_class(config)\n self.frozen_head = branch_class(\n self.base_model,\n num_layers_unfrozen=self.num_layers_unfrozen,\n ).eval()\n\n def forward_hydra(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n position_ids: Optional[List[torch.FloatTensor]] = None,\n head_mask: Optional[torch.Tensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[torch.FloatTensor, CausalLMOutputWithValue]:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n return_dict = forward_kwargs.get(\"return_dict\", True)\n forward_kwargs[\"return_dict\"] = True\n forward_kwargs[\"output_hidden_states\"] = True\n\n outputs = self.forward(**forward_kwargs)\n # Select the hidden state before the first branching layer\n input_hidden_state = outputs.hidden_states[-(self.num_layers_unfrozen + 1)]\n\n output_shape = outputs.hidden_states[-1].size()\n forward_kwargs.pop(\"input_ids\", None) # Ignore `input_ids` for branch head\n forward_kwargs.pop(\"inputs_embeds\", None) # Ignore `inputs_embeds` for branch head\n hydra_outputs = self.frozen_head(input_hidden_state, output_shape, **forward_kwargs)\n\n if not return_dict:\n return hydra_outputs.logits\n return hydra_outputs\n\n\nclass ModelBranch(transformers.PreTrainedModel):\n \"\"\"Implements the frozen upper trunk of the pretrained reference model used\n when computing the PPO KL-divergence penalty.\n \"\"\"\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n num_layers_unfrozen: int,\n ):\n \"\"\"\n Args:\n base_model (transformers.PreTrainedModel): The pretrained model to extract upper trunk from\n num_layers_unfrozen (int): The number of trainable layers\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/31", "ground_truth": " super().__init__(base_model.config)\n\n # The branch is defined by the last `num_layers_unfrozen` layers of the pretrained model\n decoder_blocks = deepcopy(hf_get_decoder_blocks(base_model))\n self.decoder_blocks = nn.ModuleList(list(decoder_blocks)[-num_layers_unfrozen:])\n self.final_norm = deepcopy(hf_get_decoder_final_norm(base_model))\n self.lm_head = deepcopy(hf_get_lm_head(base_model))\n\n self.hidden_size = hf_get_hidden_size(self.config)\n self.model_parallel = False\n self.device_map = None\n self.last_device = None\n self.gradient_checkpointing = False\n\n # Freeze the entire branch\n for parameter in self.parameters():\n parameter.requires_grad_(False)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ppo.py"], "context_start_lineno": 60, "lineno": 409, "function_name": "__init__"}, "groundtruth": " super().__init__(base_model.config)\n\n # The branch is defined by the last `num_layers_unfrozen` layers of the pretrained model\n decoder_blocks = deepcopy(hf_get_decoder_blocks(base_model))\n self.decoder_blocks = nn.ModuleList(list(decoder_blocks)[-num_layers_unfrozen:])\n self.final_norm = deepcopy(hf_get_decoder_final_norm(base_model))\n self.lm_head = deepcopy(hf_get_lm_head(base_model))\n\n self.hidden_size = hf_get_hidden_size(self.config)\n self.model_parallel = False\n self.device_map = None\n self.last_device = None\n self.gradient_checkpointing = False\n\n # Freeze the entire branch\n for parameter in self.parameters():\n parameter.requires_grad_(False)\n"} +{"prompt": "dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0\n\n if attention_mask is None:\n attention_mask = torch.ones(hidden_states.shape[:2], dtype=torch.bool, device=hidden_states.device)\n\n input_shape = hidden_states.size()[:-1]\n combined_attention_mask = None\n if input_shape[-1] > 1:\n combined_attention_mask = modeling_opt._make_causal_mask(\n input_shape,\n hidden_states.dtype,\n past_key_values_length=past_key_values_length,\n ).to(hidden_states.device)\n\n if attention_mask is not None:\n expanded_attn_mask = modeling_opt._expand_mask(\n attention_mask, hidden_states.dtype, tgt_len=input_shape[-1]\n ).to(hidden_states.device)\n combined_attention_mask = (\n expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask\n )\n attention_mask = combined_attention_mask\n\n all_hidden_states = () if output_hidden_states else None\n all_self_attns = () if output_attentions else None\n next_decoder_cache = () if use_cache else None\n\n for attn_mask, mask_name in zip([head_mask], [\"head_mask\"]):\n if attn_mask is not None:\n if attn_mask.size()[0] != (len(self.decoder_blocks)):\n raise ValueError(\n f\"The `{mask_name}` should be specified for {len(self.decoder_blocks)} layers, but it is for\"\n f\" {head_mask.size()[0]}.\"\n )\n\n for idx, decoder_layer in enumerate(self.decoder_blocks):\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n past_key_value = past_key_values[idx] if past_key_values is not None else None\n\n layer_outputs = decoder_layer(\n hidden_states,\n past_key_value=past_key_value,\n attention_mask=attention_mask,\n layer_head_mask=(head_mask[idx] if head_mask is not None else None),\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states = layer_outputs[0]\n\n if use_cache:\n next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)\n\n if output_attentions:\n all_self_attns += (layer_outputs[1],)\n\n if self.final_norm is not None:\n hidden_states = self.final_norm(hidden_states)\n\n # TODO: Add output projection support\n # https://github.com/huggingface/transformers/blob/699e90437f984d69ad3c9b891dd2e9d0fc2cffe4/src/transformers/models/opt/modeling_opt.py#L499 # noqa: E501\n # if self.project_out is not None:\n # hidden_states = self.project_out(hidden_states)\n\n if output_hidden_states:\n all_hidden_states += (hidden_states,)\n\n next_cache = next_decoder_cache if use_cache else None\n\n lm_logits = self.lm_head(hidden_states).contiguous()\n\n if not return_dict:\n return tuple(\n v\n for v in [\n lm_logits,\n hidden_states,\n next_cache,\n all_hidden_states,\n all_self_attns,\n ]\n if v is not None\n )\n\n return CausalLMOutputWithValue(\n logits=lm_logits,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n )\n\n\nclass BloomModelBranch(ModelBranch):\n def forward( # noqa: max-complexity\n self,\n hidden_states: torch.Tensor, # Takes as input hidden_states instead of input_ids\n output_shape: torch.Tensor,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = False,\n ) -> Union[Tuple, CausalLMOutputWithValue]:\n \"\"\"Reference:\n https://github.com/huggingface/transformers/blob/2411f0e465e761790879e605a4256f3d4afb7f82/src/transformers/models/bloom/modeling_bloom.py#L623 # noqa: E501\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n batch_size, seq_length = hidden_states.shape[:2]\n\n if past_key_values is None:\n past_key_values = tuple([None] * len(self.decoder_blocks))\n\n head_mask = self.get_head_mask(head_mask, hf_get_num_hidden_layers(self.config))\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n\n seq_length_with_past = seq_length\n past_key_values_length = 0\n if past_key_values[0] is not None:\n past_key_values_length = past_key_values[0][0].shape[2]\n seq_length_with_past = seq_length_with_past + past_key_values_length\n if attention_mask is None:\n attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)\n else:\n attention_mask = attention_mask.to(hidden_states.device)\n\n alibi = modeling_bloom.build_alibi_tensor(attention_mask, self.config.n_head, dtype=hidden_states.dtype)\n\n combined_attention_mask = None\n device = attention_mask.device\n input_shape = (batch_size, seq_length)\n _, src_length = input_shape\n\n if src_length > 1:\n combined_attention_mask = modeling_bloom._make_causal_mask(\n input_shape,\n device=device,\n past_key_values_length=past_key_values_length,\n )\n\n expanded_attn_mask = modeling_bloom._expand_mask(attention_mask, tgt_length=src_length)\n combined_attention_mask = (\n expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask\n )\n causal_mask = combined_attention_mask\n\n for i, (block, layer_past) in enumerate(zip(self.decoder_blocks, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n attention_mask=causal_mask,\n head_mask=head_mask[i],\n use_cache=use_cache,\n output_attentions=output_attentions,\n alibi=alibi,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n\n hidden_states = self.final_norm(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n lm_logits = self.lm_head(hidden_states)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n lm_logits,\n hidden_states,\n presents,\n all_hidden_states,\n all_self_attentions,\n ]\n if v is not None\n )\n\n return CausalLMOutputWithValue(\n logits=lm_logits,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\n# Seq2Seq architectures\n\n\n@dataclass\nclass Seq2SeqLMOutputWithValue(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n value: Optional[torch.FloatTensor] = None\n\n\nclass AutoModelForSeq2SeqLMWithValueHead(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` sequence-to-sequence\n models that have a language modeling head and a value head\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForSeq2SeqLM\n _supported_modules = [\"v_head\"]\n _supported_args = []\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n ):\n super().__init__(base_model)\n self.v_head = make_head(hf_get_hidden_size(self.base_model.config), 1)\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.FloatTensor] = None,\n encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = True,\n output_hidden_states: Optional[bool] = True,\n return_dict: Optional[bool] = None,\n ) -> Seq2SeqLMOutputWithValue:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n last_hidden_state = outputs.decoder_hidden_states[-1]\n value = self.v_head(last_hidden_state).squeeze(-1)\n\n return Seq2SeqLMOutputWithValue(**outputs, value=value)\n\n def generate(self, *args, **kwargs) -> Union[ModelOutput, torch.LongTensor]:\n return self.base_model.generate(*args, **kwargs)\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the value head\n to the state dictionary of the wrapped model by prepending the key with `v_head.`.\n \"\"\"\n base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n base_model_state_dict[f\"v_head.{k}\"] = v\n return base_model_state_dict\n\n def post_init(self, state_dict):\n \"\"\"\n We add the state dictionary of the value head to the state dictionary of the wrapped model\n by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the\n keys of the value head state dictionary.\n \"\"\"\n for k in list(state_dict.keys()):\n if \"v_head.\" in k:\n state_dict[k.replace(\"v_head.\", \"\")] = state_dict.pop(k)\n self.v_head.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect() # noqa: E702\n\n\nclass AutoModelForSeq2SeqLMWithHydraValueHead(AutoModelForSeq2SeqLMWithValueHead):\n _supported_modules = [\"v_head\", \"frozen_head\"]\n _supported_args = [\"num_layers_unfrozen\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n num_layers_unfrozen: int = -1,\n ):", "metadata": {"task_id": "CarperAI--trlx/32", "ground_truth": " super().__init__(base_model)\n self.num_layers_unfrozen = num_layers_unfrozen\n if self.num_layers_unfrozen > 0:\n branch_class = T5Branch # TODO: Add support for other model branches\n self.frozen_head = branch_class(\n self.base_model,\n num_layers_unfrozen=self.num_layers_unfrozen,\n ).eval()\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ppo.py"], "context_start_lineno": 586, "lineno": 913, "function_name": "__init__"}, "groundtruth": " super().__init__(base_model)\n self.num_layers_unfrozen = num_layers_unfrozen\n if self.num_layers_unfrozen > 0:\n branch_class = T5Branch # TODO: Add support for other model branches\n self.frozen_head = branch_class(\n self.base_model,\n num_layers_unfrozen=self.num_layers_unfrozen,\n ).eval()\n"} +{"prompt": " next_cache,\n all_hidden_states,\n all_self_attns,\n ]\n if v is not None\n )\n\n return CausalLMOutputWithValue(\n logits=lm_logits,\n past_key_values=next_cache,\n hidden_states=all_hidden_states,\n attentions=all_self_attns,\n )\n\n\nclass BloomModelBranch(ModelBranch):\n def forward( # noqa: max-complexity\n self,\n hidden_states: torch.Tensor, # Takes as input hidden_states instead of input_ids\n output_shape: torch.Tensor,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = False,\n ) -> Union[Tuple, CausalLMOutputWithValue]:\n \"\"\"Reference:\n https://github.com/huggingface/transformers/blob/2411f0e465e761790879e605a4256f3d4afb7f82/src/transformers/models/bloom/modeling_bloom.py#L623 # noqa: E501\n \"\"\"\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n batch_size, seq_length = hidden_states.shape[:2]\n\n if past_key_values is None:\n past_key_values = tuple([None] * len(self.decoder_blocks))\n\n head_mask = self.get_head_mask(head_mask, hf_get_num_hidden_layers(self.config))\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n\n seq_length_with_past = seq_length\n past_key_values_length = 0\n if past_key_values[0] is not None:\n past_key_values_length = past_key_values[0][0].shape[2]\n seq_length_with_past = seq_length_with_past + past_key_values_length\n if attention_mask is None:\n attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)\n else:\n attention_mask = attention_mask.to(hidden_states.device)\n\n alibi = modeling_bloom.build_alibi_tensor(attention_mask, self.config.n_head, dtype=hidden_states.dtype)\n\n combined_attention_mask = None\n device = attention_mask.device\n input_shape = (batch_size, seq_length)\n _, src_length = input_shape\n\n if src_length > 1:\n combined_attention_mask = modeling_bloom._make_causal_mask(\n input_shape,\n device=device,\n past_key_values_length=past_key_values_length,\n )\n\n expanded_attn_mask = modeling_bloom._expand_mask(attention_mask, tgt_length=src_length)\n combined_attention_mask = (\n expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask | combined_attention_mask\n )\n causal_mask = combined_attention_mask\n\n for i, (block, layer_past) in enumerate(zip(self.decoder_blocks, past_key_values)):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n attention_mask=causal_mask,\n head_mask=head_mask[i],\n use_cache=use_cache,\n output_attentions=output_attentions,\n alibi=alibi,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n\n hidden_states = self.final_norm(hidden_states)\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n lm_logits = self.lm_head(hidden_states)\n\n if not return_dict:\n return tuple(\n v\n for v in [\n lm_logits,\n hidden_states,\n presents,\n all_hidden_states,\n all_self_attentions,\n ]\n if v is not None\n )\n\n return CausalLMOutputWithValue(\n logits=lm_logits,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\n# Seq2Seq architectures\n\n\n@dataclass\nclass Seq2SeqLMOutputWithValue(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n value: Optional[torch.FloatTensor] = None\n\n\nclass AutoModelForSeq2SeqLMWithValueHead(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` sequence-to-sequence\n models that have a language modeling head and a value head\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForSeq2SeqLM\n _supported_modules = [\"v_head\"]\n _supported_args = []\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n ):\n super().__init__(base_model)\n self.v_head = make_head(hf_get_hidden_size(self.base_model.config), 1)\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.FloatTensor] = None,\n encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = True,\n output_hidden_states: Optional[bool] = True,\n return_dict: Optional[bool] = None,\n ) -> Seq2SeqLMOutputWithValue:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n last_hidden_state = outputs.decoder_hidden_states[-1]\n value = self.v_head(last_hidden_state).squeeze(-1)\n\n return Seq2SeqLMOutputWithValue(**outputs, value=value)\n\n def generate(self, *args, **kwargs) -> Union[ModelOutput, torch.LongTensor]:\n return self.base_model.generate(*args, **kwargs)\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the value head\n to the state dictionary of the wrapped model by prepending the key with `v_head.`.\n \"\"\"\n base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n base_model_state_dict[f\"v_head.{k}\"] = v\n return base_model_state_dict\n\n def post_init(self, state_dict):\n \"\"\"\n We add the state dictionary of the value head to the state dictionary of the wrapped model\n by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the\n keys of the value head state dictionary.\n \"\"\"\n for k in list(state_dict.keys()):\n if \"v_head.\" in k:\n state_dict[k.replace(\"v_head.\", \"\")] = state_dict.pop(k)\n self.v_head.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect() # noqa: E702\n\n\nclass AutoModelForSeq2SeqLMWithHydraValueHead(AutoModelForSeq2SeqLMWithValueHead):\n _supported_modules = [\"v_head\", \"frozen_head\"]\n _supported_args = [\"num_layers_unfrozen\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n num_layers_unfrozen: int = -1,\n ):\n super().__init__(base_model)\n self.num_layers_unfrozen = num_layers_unfrozen\n if self.num_layers_unfrozen > 0:\n branch_class = T5Branch # TODO: Add support for other model branches\n self.frozen_head = branch_class(\n self.base_model,\n num_layers_unfrozen=self.num_layers_unfrozen,\n ).eval()\n\n def forward_hydra(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.FloatTensor] = None,\n encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Seq2SeqLMOutputWithValue:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n return_dict = forward_kwargs.get(\"return_dict\", True)\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.forward(**forward_kwargs)\n # Select the hidden state before the first branching layer\n input_hidden_state = outputs.decoder_hidden_states[-(self.num_layers_unfrozen + 1)]\n hydra_outputs = self.frozen_head(\n hidden_states=input_hidden_state,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=outputs.encoder_last_hidden_state,\n encoder_attention_mask=attention_mask,\n use_cache=False,\n output_attentions=False,\n output_hidden_states=True,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return hydra_outputs.logits\n return hydra_outputs\n\n\nclass T5Branch(ModelBranch):\n \"\"\"Decoder only T5 branch\"\"\"\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n num_layers_unfrozen: int,\n ):", "metadata": {"task_id": "CarperAI--trlx/33", "ground_truth": " super().__init__(base_model, num_layers_unfrozen=num_layers_unfrozen)\n self.dropout = hf_get_decoder(base_model).dropout\n self.is_decoder = True\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ppo.py"], "context_start_lineno": 667, "lineno": 989, "function_name": "__init__"}, "groundtruth": " super().__init__(base_model, num_layers_unfrozen=num_layers_unfrozen)\n self.dropout = hf_get_decoder(base_model).dropout\n self.is_decoder = True\n"} +{"prompt": "states=all_hidden_states,\n attentions=all_self_attentions,\n )\n\n\n# Seq2Seq architectures\n\n\n@dataclass\nclass Seq2SeqLMOutputWithValue(ModelOutput):\n loss: Optional[torch.FloatTensor] = None\n logits: Optional[torch.FloatTensor] = None\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None\n decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n cross_attentions: Optional[Tuple[torch.FloatTensor]] = None\n encoder_last_hidden_state: Optional[torch.FloatTensor] = None\n encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None\n encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None\n value: Optional[torch.FloatTensor] = None\n\n\nclass AutoModelForSeq2SeqLMWithValueHead(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` sequence-to-sequence\n models that have a language modeling head and a value head\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForSeq2SeqLM\n _supported_modules = [\"v_head\"]\n _supported_args = []\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n ):\n super().__init__(base_model)\n self.v_head = make_head(hf_get_hidden_size(self.base_model.config), 1)\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.FloatTensor] = None,\n encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = True,\n output_hidden_states: Optional[bool] = True,\n return_dict: Optional[bool] = None,\n ) -> Seq2SeqLMOutputWithValue:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n last_hidden_state = outputs.decoder_hidden_states[-1]\n value = self.v_head(last_hidden_state).squeeze(-1)\n\n return Seq2SeqLMOutputWithValue(**outputs, value=value)\n\n def generate(self, *args, **kwargs) -> Union[ModelOutput, torch.LongTensor]:\n return self.base_model.generate(*args, **kwargs)\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the value head\n to the state dictionary of the wrapped model by prepending the key with `v_head.`.\n \"\"\"\n base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n v_head_state_dict = self.v_head.state_dict(*args, **kwargs)\n for k, v in v_head_state_dict.items():\n base_model_state_dict[f\"v_head.{k}\"] = v\n return base_model_state_dict\n\n def post_init(self, state_dict):\n \"\"\"\n We add the state dictionary of the value head to the state dictionary of the wrapped model\n by prepending the key with `v_head.`. This function removes the `v_head.` prefix from the\n keys of the value head state dictionary.\n \"\"\"\n for k in list(state_dict.keys()):\n if \"v_head.\" in k:\n state_dict[k.replace(\"v_head.\", \"\")] = state_dict.pop(k)\n self.v_head.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect() # noqa: E702\n\n\nclass AutoModelForSeq2SeqLMWithHydraValueHead(AutoModelForSeq2SeqLMWithValueHead):\n _supported_modules = [\"v_head\", \"frozen_head\"]\n _supported_args = [\"num_layers_unfrozen\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n num_layers_unfrozen: int = -1,\n ):\n super().__init__(base_model)\n self.num_layers_unfrozen = num_layers_unfrozen\n if self.num_layers_unfrozen > 0:\n branch_class = T5Branch # TODO: Add support for other model branches\n self.frozen_head = branch_class(\n self.base_model,\n num_layers_unfrozen=self.num_layers_unfrozen,\n ).eval()\n\n def forward_hydra(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.FloatTensor] = None,\n encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,\n past_key_values: Optional[Tuple[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Seq2SeqLMOutputWithValue:\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n encoder_outputs=encoder_outputs,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n decoder_inputs_embeds=decoder_inputs_embeds,\n head_mask=head_mask,\n decoder_head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n return_dict = forward_kwargs.get(\"return_dict\", True)\n forward_kwargs[\"output_hidden_states\"] = True\n forward_kwargs[\"return_dict\"] = True\n\n outputs = self.forward(**forward_kwargs)\n # Select the hidden state before the first branching layer\n input_hidden_state = outputs.decoder_hidden_states[-(self.num_layers_unfrozen + 1)]\n hydra_outputs = self.frozen_head(\n hidden_states=input_hidden_state,\n attention_mask=decoder_attention_mask,\n encoder_hidden_states=outputs.encoder_last_hidden_state,\n encoder_attention_mask=attention_mask,\n use_cache=False,\n output_attentions=False,\n output_hidden_states=True,\n return_dict=return_dict,\n )\n\n if not return_dict:\n return hydra_outputs.logits\n return hydra_outputs\n\n\nclass T5Branch(ModelBranch):\n \"\"\"Decoder only T5 branch\"\"\"\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n num_layers_unfrozen: int,\n ):\n super().__init__(base_model, num_layers_unfrozen=num_layers_unfrozen)\n self.dropout = hf_get_decoder(base_model).dropout\n self.is_decoder = True\n\n def forward( # noqa: max-complexity\n self,\n hidden_states: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, Seq2SeqLMOutputWithValue]:\n \"\"\"Reference:\n https://github.com/huggingface/transformers/blob/bc21aaca789f1a366c05e8b5e111632944886393/src/transformers/models/t5/modeling_t5.py#L899 # noqa: E501\n \"\"\"\n batch_size, seq_length = hidden_states.shape[:2]\n input_shape = (batch_size, seq_length)\n\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if attention_mask is None:\n attention_mask = torch.ones(batch_size, seq_length, device=hidden_states.device)\n if self.is_decoder and encoder_attention_mask is None and encoder_hidden_states is not None:\n encoder_seq_length = encoder_hidden_states.shape[1]\n encoder_attention_mask = torch.ones(\n batch_size, encoder_seq_length, device=hidden_states.device, dtype=torch.long\n )\n\n extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)\n\n if self.is_decoder and encoder_hidden_states is not None:\n encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()\n encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)\n if encoder_attention_mask is None:\n encoder_attention_mask = torch.ones(encoder_hidden_shape, device=hidden_states.device)\n encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)\n else:\n encoder_extended_attention_mask = None\n\n all_hidden_states = () if output_hidden_states else None\n all_attentions = () if output_attentions else None\n position_bias = None\n encoder_decoder_position_bias = None\n\n for _, layer_module in enumerate(self.decoder_blocks):\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n layer_outputs = layer_module(\n hidden_states,\n attention_mask=extended_attention_mask,\n position_bias=position_bias,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_extended_attention_mask,\n encoder_decoder_position_bias=encoder_decoder_position_bias,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n if use_cache is False:\n layer_outputs = layer_outputs[:1] + (None,) + layer_outputs[1:]\n\n hidden_states, present_key_value_state = layer_outputs[:2]\n\n position_bias = layer_outputs[2]\n if self.is_decoder and encoder_hidden_states is not None:\n encoder_decoder_position_bias = layer_outputs[4 if output_attentions else 3]\n\n if output_attentions:\n all_attentions = all_attentions + (layer_outputs[3],)\n\n hidden_states = self.final_norm(hidden_states)\n hidden_states = self.dropout(hidden_states)\n\n # Add last layer\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n sequence_output = hidden_states\n\n if self.config.tie_word_embeddings:\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586 # noqa: E501\n sequence_output = sequence_output * (self.config.d_model**-0.5)\n\n lm_logits = self.lm_head(sequence_output)\n\n if not return_dict:\n return (lm_logits,)\n\n return Seq2SeqLMOutputWithValue(\n logits=lm_logits,\n decoder_hidden_states=all_hidden_states,\n decoder_attentions=all_attentions,\n )\n\n\n# Branch class utils\n\n\ndef hf_get_branch_class(\n config: transformers.PretrainedConfig,\n) -> \"ModelBranch\":\n \"\"\"Returns the model branch class for the given config.\"\"\"", "metadata": {"task_id": "CarperAI--trlx/34", "ground_truth": " gpt_branch_supported_archs = [\n \"GPTJForCausalLM\",\n \"GPT2LMHeadModel\",\n \"GPTNeoForCausalLM\",\n \"GPTNeoXForCausalLM\",\n ]\n opt_branch_supported_archs = [\"OPTForCausalLM\"]\n bloom_branch_supported_archs = [\"BloomModel\", \"BloomForCausalLM\"]\n arch = config.architectures[0]\n if arch in gpt_branch_supported_archs:\n return GPTModelBranch\n elif arch in opt_branch_supported_archs:\n return OPTModelBranch\n elif arch in bloom_branch_supported_archs:\n return BloomModelBranch\n else:\n all_supported_archs = sum(\n [\n gpt_branch_supported_archs,\n opt_branch_supported_archs,\n bloom_branch_supported_archs,\n ],\n [],\n )\n raise ValueError(\n f\"Unsupported architecture: `{arch}`. The following architectures are \"\n f\"available for model branching:\\n{all_supported_archs}\"\n )\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ppo.py"], "context_start_lineno": 793, "lineno": 1099, "function_name": "hf_get_branch_class"}, "groundtruth": " gpt_branch_supported_archs = [\n \"GPTJForCausalLM\",\n \"GPT2LMHeadModel\",\n \"GPTNeoForCausalLM\",\n \"GPTNeoXForCausalLM\",\n ]\n opt_branch_supported_archs = [\"OPTForCausalLM\"]\n bloom_branch_supported_archs = [\"BloomModel\", \"BloomForCausalLM\"]\n arch = config.architectures[0]\n if arch in gpt_branch_supported_archs:\n return GPTModelBranch\n elif arch in opt_branch_supported_archs:\n return OPTModelBranch\n elif arch in bloom_branch_supported_archs:\n return BloomModelBranch\n else:\n all_supported_archs = sum(\n [\n gpt_branch_supported_archs,\n opt_branch_supported_archs,\n bloom_branch_supported_archs,\n ],\n [],\n )\n raise ValueError(\n f\"Unsupported architecture: `{arch}`. The following architectures are \"\n f\"available for model branching:\\n{all_supported_archs}\"\n )\n"} +{"prompt": "import gc\nimport os\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import chain\n\nimport deepspeed # type: ignore\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport transformers\nfrom torch import nn\nfrom torchtyping import TensorType\n\nfrom trlx.data.ilql_types import ILQLBatch\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_hidden_size,\n hf_get_lm_head,\n make_head,\n)\n\n\ndef topk_mask(xs: torch.FloatTensor, k: int):", "metadata": {"task_id": "CarperAI--trlx/35", "ground_truth": " if k > xs.shape[-1]:\n return xs\n mintop = torch.topk(xs, k)[0][:, -1].unsqueeze(-1)\n return torch.where(xs < mintop, -np.inf * torch.ones_like(xs, dtype=xs.dtype), xs)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ilql.py"], "context_start_lineno": 0, "lineno": 28, "function_name": "topk_mask"}, "groundtruth": " if k > xs.shape[-1]:\n return xs\n mintop = torch.topk(xs, k)[0][:, -1].unsqueeze(-1)\n return torch.where(xs < mintop, -np.inf * torch.ones_like(xs, dtype=xs.dtype), xs)\n"} +{"prompt": "import gc\nimport os\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import chain\n\nimport deepspeed # type: ignore\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport transformers\nfrom torch import nn\nfrom torchtyping import TensorType\n\nfrom trlx.data.ilql_types import ILQLBatch\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_hidden_size,\n hf_get_lm_head,\n make_head,\n)\n\n\ndef topk_mask(xs: torch.FloatTensor, k: int):\n if k > xs.shape[-1]:\n return xs\n mintop = torch.topk(xs, k)[0][:, -1].unsqueeze(-1)\n return torch.where(xs < mintop, -np.inf * torch.ones_like(xs, dtype=xs.dtype), xs)\n\n\ndef batched_index_select(\n x: TensorType[\"batch\", \"seq_len\", \"hidden\"],\n idxs: TensorType[\"batch\", \"index_len\"],\n dim: int,\n) -> TensorType[\"batch\", \"index_len\", \"hidden\"]:\n \"\"\"\n Gather vectors at idxs along dim from x\n \"\"\"\n idxs = idxs.unsqueeze(-1).expand(idxs.shape[0], idxs.shape[1], x.shape[-1])\n return x.gather(dim=dim, index=idxs)\n\n\n@dataclass\n@register_method\nclass ILQLConfig(MethodConfig):\n tau: float\n gamma: float\n cql_scale: float\n awac_scale: float\n alpha: float\n beta: float\n steps_for_target_q_sync: float\n two_qs: bool\n gen_kwargs: dict\n\n def loss(self, outputs, labels: ILQLBatch):\n logits, (qs, target_qs, vs) = outputs\n terminal_mask = labels.dones[:, :-1]\n n_nonterminal = max(1, terminal_mask.sum())\n\n actions = labels.input_ids[:, 1:].gather(dim=1, index=labels.actions_ixs).unsqueeze(-1)\n nactions = actions.shape[1]\n bsize, _, dsize = logits.shape\n\n Q = [q.gather(-1, actions).squeeze(-1) for q in qs]\n targetQs = [q.gather(-1, actions).squeeze(-1).detach() for q in target_qs]\n targetQ = reduce(torch.minimum, targetQs)\n\n # values of current states\n V = vs[:, :-1].squeeze()\n # values of next states\n Vnext = vs[:, 1:].squeeze() * labels.dones[:, 1:]\n # target to fit Q\n Q_ = labels.rewards + self.gamma * Vnext.detach()\n\n loss_qs = [((Qi - Q_) * terminal_mask).pow(2).sum() / n_nonterminal for Qi in Q]\n loss_q = sum(loss_qs)\n\n targetQ = targetQ.detach()\n\n loss_v = (\n (\n (targetQ >= V).int() * self.tau * (targetQ - V).pow(2)\n + (targetQ < V).int() * (1 - self.tau) * (targetQ - V).pow(2)\n )\n * terminal_mask\n ).sum() / n_nonterminal\n\n def cql_loss(q):\n loss = F.cross_entropy(q.reshape(-1, dsize), actions.reshape(-1), reduction=\"none\")\n loss = loss.reshape(bsize, nactions) * terminal_mask\n loss = loss.sum() / n_nonterminal\n return loss\n\n loss_cql = sum(cql_loss(q) for q in qs)\n\n # select logits from continuations\n action_logits = batched_index_select(logits, labels.actions_ixs, dim=1)\n cross_entropy = F.cross_entropy(\n action_logits.reshape(-1, dsize),\n actions.reshape(-1),\n reduction=\"none\",\n ).reshape(bsize, nactions)\n\n with torch.no_grad():\n awac_weight = torch.exp(self.beta * (targetQ - V))\n\n loss_awac = torch.sum(cross_entropy * awac_weight * terminal_mask) / n_nonterminal\n loss = loss_q + loss_v + self.cql_scale * loss_cql + self.awac_scale * loss_awac\n\n stats = dict(\n losses=dict(\n loss=loss.item(),\n loss_q=loss_q.item(),\n loss_v=loss_v.item(),\n loss_cql=loss_cql.item(),\n loss_awac=loss_awac.item(),\n ),\n values=get_tensor_stats(V, terminal_mask, n_nonterminal),\n qvalues={str(ix): get_tensor_stats(Q[ix], terminal_mask, n_nonterminal) for ix in range(len(Q))},\n awac_weight=get_tensor_stats(awac_weight, terminal_mask, n_nonterminal),\n )\n\n return loss, flatten_dict(stats)\n\n\nclass ILQLHeads(nn.Module):\n def __init__(\n self,\n hidden_size: int,\n vocab_size: int,\n two_qs: bool,\n alpha: float,\n dtype: type,\n ):", "metadata": {"task_id": "CarperAI--trlx/36", "ground_truth": " super().__init__()\n\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.two_qs = two_qs\n self.alpha = alpha\n self.v_head = make_head(self.hidden_size, 1, dtype)\n\n n_qs = 2 if self.two_qs else 1\n self.q_heads = nn.ModuleList(make_head(self.hidden_size, self.vocab_size, dtype) for _ in range(n_qs))\n self.target_q_heads = nn.ModuleList(deepcopy(q_head) for q_head in self.q_heads)\n\n for target_q_head in self.target_q_heads:\n target_q_head.requires_grad_(False)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ilql.py"], "context_start_lineno": 0, "lineno": 139, "function_name": "__init__"}, "groundtruth": " super().__init__()\n\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.two_qs = two_qs\n self.alpha = alpha\n self.v_head = make_head(self.hidden_size, 1, dtype)\n\n n_qs = 2 if self.two_qs else 1\n self.q_heads = nn.ModuleList(make_head(self.hidden_size, self.vocab_size, dtype) for _ in range(n_qs))\n self.target_q_heads = nn.ModuleList(deepcopy(q_head) for q_head in self.q_heads)\n\n for target_q_head in self.target_q_heads:\n target_q_head.requires_grad_(False)\n"} +{"prompt": "import gc\nimport os\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import chain\n\nimport deepspeed # type: ignore\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport transformers\nfrom torch import nn\nfrom torchtyping import TensorType\n\nfrom trlx.data.ilql_types import ILQLBatch\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_hidden_size,\n hf_get_lm_head,\n make_head,\n)\n\n\ndef topk_mask(xs: torch.FloatTensor, k: int):\n if k > xs.shape[-1]:\n return xs\n mintop = torch.topk(xs, k)[0][:, -1].unsqueeze(-1)\n return torch.where(xs < mintop, -np.inf * torch.ones_like(xs, dtype=xs.dtype), xs)\n\n\ndef batched_index_select(\n x: TensorType[\"batch\", \"seq_len\", \"hidden\"],\n idxs: TensorType[\"batch\", \"index_len\"],\n dim: int,\n) -> TensorType[\"batch\", \"index_len\", \"hidden\"]:\n \"\"\"\n Gather vectors at idxs along dim from x\n \"\"\"\n idxs = idxs.unsqueeze(-1).expand(idxs.shape[0], idxs.shape[1], x.shape[-1])\n return x.gather(dim=dim, index=idxs)\n\n\n@dataclass\n@register_method\nclass ILQLConfig(MethodConfig):\n tau: float\n gamma: float\n cql_scale: float\n awac_scale: float\n alpha: float\n beta: float\n steps_for_target_q_sync: float\n two_qs: bool\n gen_kwargs: dict\n\n def loss(self, outputs, labels: ILQLBatch):\n logits, (qs, target_qs, vs) = outputs\n terminal_mask = labels.dones[:, :-1]\n n_nonterminal = max(1, terminal_mask.sum())\n\n actions = labels.input_ids[:, 1:].gather(dim=1, index=labels.actions_ixs).unsqueeze(-1)\n nactions = actions.shape[1]\n bsize, _, dsize = logits.shape\n\n Q = [q.gather(-1, actions).squeeze(-1) for q in qs]\n targetQs = [q.gather(-1, actions).squeeze(-1).detach() for q in target_qs]\n targetQ = reduce(torch.minimum, targetQs)\n\n # values of current states\n V = vs[:, :-1].squeeze()\n # values of next states\n Vnext = vs[:, 1:].squeeze() * labels.dones[:, 1:]\n # target to fit Q\n Q_ = labels.rewards + self.gamma * Vnext.detach()\n\n loss_qs = [((Qi - Q_) * terminal_mask).pow(2).sum() / n_nonterminal for Qi in Q]\n loss_q = sum(loss_qs)\n\n targetQ = targetQ.detach()\n\n loss_v = (\n (\n (targetQ >= V).int() * self.tau * (targetQ - V).pow(2)\n + (targetQ < V).int() * (1 - self.tau) * (targetQ - V).pow(2)\n )\n * terminal_mask\n ).sum() / n_nonterminal\n\n def cql_loss(q):\n loss = F.cross_entropy(q.reshape(-1, dsize), actions.reshape(-1), reduction=\"none\")\n loss = loss.reshape(bsize, nactions) * terminal_mask\n loss = loss.sum() / n_nonterminal\n return loss\n\n loss_cql = sum(cql_loss(q) for q in qs)\n\n # select logits from continuations\n action_logits = batched_index_select(logits, labels.actions_ixs, dim=1)\n cross_entropy = F.cross_entropy(\n action_logits.reshape(-1, dsize),\n actions.reshape(-1),\n reduction=\"none\",\n ).reshape(bsize, nactions)\n\n with torch.no_grad():\n awac_weight = torch.exp(self.beta * (targetQ - V))\n\n loss_awac = torch.sum(cross_entropy * awac_weight * terminal_mask) / n_nonterminal\n loss = loss_q + loss_v + self.cql_scale * loss_cql + self.awac_scale * loss_awac\n\n stats = dict(\n losses=dict(\n loss=loss.item(),\n loss_q=loss_q.item(),\n loss_v=loss_v.item(),\n loss_cql=loss_cql.item(),\n loss_awac=loss_awac.item(),\n ),\n values=get_tensor_stats(V, terminal_mask, n_nonterminal),\n qvalues={str(ix): get_tensor_stats(Q[ix], terminal_mask, n_nonterminal) for ix in range(len(Q))},\n awac_weight=get_tensor_stats(awac_weight, terminal_mask, n_nonterminal),\n )\n\n return loss, flatten_dict(stats)\n\n\nclass ILQLHeads(nn.Module):\n def __init__(\n self,\n hidden_size: int,\n vocab_size: int,\n two_qs: bool,\n alpha: float,\n dtype: type,\n ):\n super().__init__()\n\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.two_qs = two_qs\n self.alpha = alpha\n self.v_head = make_head(self.hidden_size, 1, dtype)\n\n n_qs = 2 if self.two_qs else 1\n self.q_heads = nn.ModuleList(make_head(self.hidden_size, self.vocab_size, dtype) for _ in range(n_qs))\n self.target_q_heads = nn.ModuleList(deepcopy(q_head) for q_head in self.q_heads)\n\n for target_q_head in self.target_q_heads:\n target_q_head.requires_grad_(False)\n\n def forward(\n self,\n hs: torch.Tensor,\n states_ixs: torch.Tensor = None,\n actions_ixs: torch.Tensor = None,\n **kwargs,\n ):", "metadata": {"task_id": "CarperAI--trlx/37", "ground_truth": " if states_ixs is not None:\n states_hs = batched_index_select(hs, states_ixs, 1)\n actions_hs = batched_index_select(hs, actions_ixs, 1)\n else:\n states_hs = actions_hs = hs\n\n qs = tuple(q_head(actions_hs) for q_head in self.q_heads)\n target_qs = tuple(q_head(actions_hs) for q_head in self.target_q_heads)\n vs = self.v_head(states_hs)\n\n return qs, target_qs, vs\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ilql.py"], "context_start_lineno": 0, "lineno": 161, "function_name": "forward"}, "groundtruth": " if states_ixs is not None:\n states_hs = batched_index_select(hs, states_ixs, 1)\n actions_hs = batched_index_select(hs, actions_ixs, 1)\n else:\n states_hs = actions_hs = hs\n\n qs = tuple(q_head(actions_hs) for q_head in self.q_heads)\n target_qs = tuple(q_head(actions_hs) for q_head in self.target_q_heads)\n vs = self.v_head(states_hs)\n\n return qs, target_qs, vs\n"} +{"prompt": "import gc\nimport os\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import chain\n\nimport deepspeed # type: ignore\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport transformers\nfrom torch import nn\nfrom torchtyping import TensorType\n\nfrom trlx.data.ilql_types import ILQLBatch\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_hidden_size,\n hf_get_lm_head,\n make_head,\n)\n\n\ndef topk_mask(xs: torch.FloatTensor, k: int):\n if k > xs.shape[-1]:\n return xs\n mintop = torch.topk(xs, k)[0][:, -1].unsqueeze(-1)\n return torch.where(xs < mintop, -np.inf * torch.ones_like(xs, dtype=xs.dtype), xs)\n\n\ndef batched_index_select(\n x: TensorType[\"batch\", \"seq_len\", \"hidden\"],\n idxs: TensorType[\"batch\", \"index_len\"],\n dim: int,\n) -> TensorType[\"batch\", \"index_len\", \"hidden\"]:\n \"\"\"\n Gather vectors at idxs along dim from x\n \"\"\"\n idxs = idxs.unsqueeze(-1).expand(idxs.shape[0], idxs.shape[1], x.shape[-1])\n return x.gather(dim=dim, index=idxs)\n\n\n@dataclass\n@register_method\nclass ILQLConfig(MethodConfig):\n tau: float\n gamma: float\n cql_scale: float\n awac_scale: float\n alpha: float\n beta: float\n steps_for_target_q_sync: float\n two_qs: bool\n gen_kwargs: dict\n\n def loss(self, outputs, labels: ILQLBatch):\n logits, (qs, target_qs, vs) = outputs\n terminal_mask = labels.dones[:, :-1]\n n_nonterminal = max(1, terminal_mask.sum())\n\n actions = labels.input_ids[:, 1:].gather(dim=1, index=labels.actions_ixs).unsqueeze(-1)\n nactions = actions.shape[1]\n bsize, _, dsize = logits.shape\n\n Q = [q.gather(-1, actions).squeeze(-1) for q in qs]\n targetQs = [q.gather(-1, actions).squeeze(-1).detach() for q in target_qs]\n targetQ = reduce(torch.minimum, targetQs)\n\n # values of current states\n V = vs[:, :-1].squeeze()\n # values of next states\n Vnext = vs[:, 1:].squeeze() * labels.dones[:, 1:]\n # target to fit Q\n Q_ = labels.rewards + self.gamma * Vnext.detach()\n\n loss_qs = [((Qi - Q_) * terminal_mask).pow(2).sum() / n_nonterminal for Qi in Q]\n loss_q = sum(loss_qs)\n\n targetQ = targetQ.detach()\n\n loss_v = (\n (\n (targetQ >= V).int() * self.tau * (targetQ - V).pow(2)\n + (targetQ < V).int() * (1 - self.tau) * (targetQ - V).pow(2)\n )\n * terminal_mask\n ).sum() / n_nonterminal\n\n def cql_loss(q):\n loss = F.cross_entropy(q.reshape(-1, dsize), actions.reshape(-1), reduction=\"none\")\n loss = loss.reshape(bsize, nactions) * terminal_mask\n loss = loss.sum() / n_nonterminal\n return loss\n\n loss_cql = sum(cql_loss(q) for q in qs)\n\n # select logits from continuations\n action_logits = batched_index_select(logits, labels.actions_ixs, dim=1)\n cross_entropy = F.cross_entropy(\n action_logits.reshape(-1, dsize),\n actions.reshape(-1),\n reduction=\"none\",\n ).reshape(bsize, nactions)\n\n with torch.no_grad():\n awac_weight = torch.exp(self.beta * (targetQ - V))\n\n loss_awac = torch.sum(cross_entropy * awac_weight * terminal_mask) / n_nonterminal\n loss = loss_q + loss_v + self.cql_scale * loss_cql + self.awac_scale * loss_awac\n\n stats = dict(\n losses=dict(\n loss=loss.item(),\n loss_q=loss_q.item(),\n loss_v=loss_v.item(),\n loss_cql=loss_cql.item(),\n loss_awac=loss_awac.item(),\n ),\n values=get_tensor_stats(V, terminal_mask, n_nonterminal),\n qvalues={str(ix): get_tensor_stats(Q[ix], terminal_mask, n_nonterminal) for ix in range(len(Q))},\n awac_weight=get_tensor_stats(awac_weight, terminal_mask, n_nonterminal),\n )\n\n return loss, flatten_dict(stats)\n\n\nclass ILQLHeads(nn.Module):\n def __init__(\n self,\n hidden_size: int,\n vocab_size: int,\n two_qs: bool,\n alpha: float,\n dtype: type,\n ):\n super().__init__()\n\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.two_qs = two_qs\n self.alpha = alpha\n self.v_head = make_head(self.hidden_size, 1, dtype)\n\n n_qs = 2 if self.two_qs else 1\n self.q_heads = nn.ModuleList(make_head(self.hidden_size, self.vocab_size, dtype) for _ in range(n_qs))\n self.target_q_heads = nn.ModuleList(deepcopy(q_head) for q_head in self.q_heads)\n\n for target_q_head in self.target_q_heads:\n target_q_head.requires_grad_(False)\n\n def forward(\n self,\n hs: torch.Tensor,\n states_ixs: torch.Tensor = None,\n actions_ixs: torch.Tensor = None,\n **kwargs,\n ):\n if states_ixs is not None:\n states_hs = batched_index_select(hs, states_ixs, 1)\n actions_hs = batched_index_select(hs, actions_ixs, 1)\n else:\n states_hs = actions_hs = hs\n\n qs = tuple(q_head(actions_hs) for q_head in self.q_heads)\n target_qs = tuple(q_head(actions_hs) for q_head in self.target_q_heads)\n vs = self.v_head(states_hs)\n\n return qs, target_qs, vs\n\n def _sync_target_q_heads(self, alpha):\n for target_q_head, q_head in zip(self.target_q_heads, self.q_heads):\n for target_param, copy_param in zip(target_q_head.parameters(), q_head.parameters()):\n target_param.data.copy_((alpha * copy_param.data) + (1.0 - alpha) * target_param.data)\n\n def sync_target_q_heads(self):\n if os.environ.get(\"DEEPSPEED_ZERO_STAGE\", \"0\") == \"3\":\n params = chain(\n chain(q_head.parameters() for q_head in self.q_heads),\n chain(q_head.parameters() for q_head in self.target_q_heads),\n )\n\n with deepspeed.zero.GatheredParameters(list(params), modifier_rank=0):\n if deepspeed.comm.get_rank() == 0:\n self._sync_target_q_heads(self.alpha)\n else:\n self._sync_target_q_heads(self.alpha)\n\n\nclass AutoModelForCausalLMWithILQLHeads(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models wtih a language\n modeling head and ILQL heads.\n\n References:\n [1] Snell et al., \"Offline RL for Natural Language Generation with Implicit Language Q Learning\",\n https://arxiv.org/abs/2206.11871, 2022\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"ilql_heads\"]\n _supported_args = [\"two_qs\", \"alpha\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n two_qs: bool = True,\n alpha: float = 0.99,\n ):", "metadata": {"task_id": "CarperAI--trlx/38", "ground_truth": " super().__init__(base_model)\n hidden_size = hf_get_hidden_size(self.base_model.config)\n vocab_size = self.base_model.config.vocab_size\n dtype = next(hf_get_lm_head(self.base_model).parameters()).dtype\n self.two_qs = two_qs\n self.alpha = alpha\n self.ilql_heads = ILQLHeads(hidden_size, vocab_size, self.two_qs, self.alpha, dtype=dtype)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ilql.py"], "context_start_lineno": 0, "lineno": 212, "function_name": "__init__"}, "groundtruth": " super().__init__(base_model)\n hidden_size = hf_get_hidden_size(self.base_model.config)\n vocab_size = self.base_model.config.vocab_size\n dtype = next(hf_get_lm_head(self.base_model).parameters()).dtype\n self.two_qs = two_qs\n self.alpha = alpha\n self.ilql_heads = ILQLHeads(hidden_size, vocab_size, self.two_qs, self.alpha, dtype=dtype)\n"} +{"prompt": "import gc\nimport os\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import chain\n\nimport deepspeed # type: ignore\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport transformers\nfrom torch import nn\nfrom torchtyping import TensorType\n\nfrom trlx.data.ilql_types import ILQLBatch\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_hidden_size,\n hf_get_lm_head,\n make_head,\n)\n\n\ndef topk_mask(xs: torch.FloatTensor, k: int):\n if k > xs.shape[-1]:\n return xs\n mintop = torch.topk(xs, k)[0][:, -1].unsqueeze(-1)\n return torch.where(xs < mintop, -np.inf * torch.ones_like(xs, dtype=xs.dtype), xs)\n\n\ndef batched_index_select(\n x: TensorType[\"batch\", \"seq_len\", \"hidden\"],\n idxs: TensorType[\"batch\", \"index_len\"],\n dim: int,\n) -> TensorType[\"batch\", \"index_len\", \"hidden\"]:\n \"\"\"\n Gather vectors at idxs along dim from x\n \"\"\"\n idxs = idxs.unsqueeze(-1).expand(idxs.shape[0], idxs.shape[1], x.shape[-1])\n return x.gather(dim=dim, index=idxs)\n\n\n@dataclass\n@register_method\nclass ILQLConfig(MethodConfig):\n tau: float\n gamma: float\n cql_scale: float\n awac_scale: float\n alpha: float\n beta: float\n steps_for_target_q_sync: float\n two_qs: bool\n gen_kwargs: dict\n\n def loss(self, outputs, labels: ILQLBatch):\n logits, (qs, target_qs, vs) = outputs\n terminal_mask = labels.dones[:, :-1]\n n_nonterminal = max(1, terminal_mask.sum())\n\n actions = labels.input_ids[:, 1:].gather(dim=1, index=labels.actions_ixs).unsqueeze(-1)\n nactions = actions.shape[1]\n bsize, _, dsize = logits.shape\n\n Q = [q.gather(-1, actions).squeeze(-1) for q in qs]\n targetQs = [q.gather(-1, actions).squeeze(-1).detach() for q in target_qs]\n targetQ = reduce(torch.minimum, targetQs)\n\n # values of current states\n V = vs[:, :-1].squeeze()\n # values of next states\n Vnext = vs[:, 1:].squeeze() * labels.dones[:, 1:]\n # target to fit Q\n Q_ = labels.rewards + self.gamma * Vnext.detach()\n\n loss_qs = [((Qi - Q_) * terminal_mask).pow(2).sum() / n_nonterminal for Qi in Q]\n loss_q = sum(loss_qs)\n\n targetQ = targetQ.detach()\n\n loss_v = (\n (\n (targetQ >= V).int() * self.tau * (targetQ - V).pow(2)\n + (targetQ < V).int() * (1 - self.tau) * (targetQ - V).pow(2)\n )\n * terminal_mask\n ).sum() / n_nonterminal\n\n def cql_loss(q):\n loss = F.cross_entropy(q.reshape(-1, dsize), actions.reshape(-1), reduction=\"none\")\n loss = loss.reshape(bsize, nactions) * terminal_mask\n loss = loss.sum() / n_nonterminal\n return loss\n\n loss_cql = sum(cql_loss(q) for q in qs)\n\n # select logits from continuations\n action_logits = batched_index_select(logits, labels.actions_ixs, dim=1)\n cross_entropy = F.cross_entropy(\n action_logits.reshape(-1, dsize),\n actions.reshape(-1),\n reduction=\"none\",\n ).reshape(bsize, nactions)\n\n with torch.no_grad():\n awac_weight = torch.exp(self.beta * (targetQ - V))\n\n loss_awac = torch.sum(cross_entropy * awac_weight * terminal_mask) / n_nonterminal\n loss = loss_q + loss_v + self.cql_scale * loss_cql + self.awac_scale * loss_awac\n\n stats = dict(\n losses=dict(\n loss=loss.item(),\n loss_q=loss_q.item(),\n loss_v=loss_v.item(),\n loss_cql=loss_cql.item(),\n loss_awac=loss_awac.item(),\n ),\n values=get_tensor_stats(V, terminal_mask, n_nonterminal),\n qvalues={str(ix): get_tensor_stats(Q[ix], terminal_mask, n_nonterminal) for ix in range(len(Q))},\n awac_weight=get_tensor_stats(awac_weight, terminal_mask, n_nonterminal),\n )\n\n return loss, flatten_dict(stats)\n\n\nclass ILQLHeads(nn.Module):\n def __init__(\n self,\n hidden_size: int,\n vocab_size: int,\n two_qs: bool,\n alpha: float,\n dtype: type,\n ):\n super().__init__()\n\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.two_qs = two_qs\n self.alpha = alpha\n self.v_head = make_head(self.hidden_size, 1, dtype)\n\n n_qs = 2 if self.two_qs else 1\n self.q_heads = nn.ModuleList(make_head(self.hidden_size, self.vocab_size, dtype) for _ in range(n_qs))\n self.target_q_heads = nn.ModuleList(deepcopy(q_head) for q_head in self.q_heads)\n\n for target_q_head in self.target_q_heads:\n target_q_head.requires_grad_(False)\n\n def forward(\n self,\n hs: torch.Tensor,\n states_ixs: torch.Tensor = None,\n actions_ixs: torch.Tensor = None,\n **kwargs,\n ):\n if states_ixs is not None:\n states_hs = batched_index_select(hs, states_ixs, 1)\n actions_hs = batched_index_select(hs, actions_ixs, 1)\n else:\n states_hs = actions_hs = hs\n\n qs = tuple(q_head(actions_hs) for q_head in self.q_heads)\n target_qs = tuple(q_head(actions_hs) for q_head in self.target_q_heads)\n vs = self.v_head(states_hs)\n\n return qs, target_qs, vs\n\n def _sync_target_q_heads(self, alpha):\n for target_q_head, q_head in zip(self.target_q_heads, self.q_heads):\n for target_param, copy_param in zip(target_q_head.parameters(), q_head.parameters()):\n target_param.data.copy_((alpha * copy_param.data) + (1.0 - alpha) * target_param.data)\n\n def sync_target_q_heads(self):\n if os.environ.get(\"DEEPSPEED_ZERO_STAGE\", \"0\") == \"3\":\n params = chain(\n chain(q_head.parameters() for q_head in self.q_heads),\n chain(q_head.parameters() for q_head in self.target_q_heads),\n )\n\n with deepspeed.zero.GatheredParameters(list(params), modifier_rank=0):\n if deepspeed.comm.get_rank() == 0:\n self._sync_target_q_heads(self.alpha)\n else:\n self._sync_target_q_heads(self.alpha)\n\n\nclass AutoModelForCausalLMWithILQLHeads(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models wtih a language\n modeling head and ILQL heads.\n\n References:\n [1] Snell et al., \"Offline RL for Natural Language Generation with Implicit Language Q Learning\",\n https://arxiv.org/abs/2206.11871, 2022\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"ilql_heads\"]\n _supported_args = [\"two_qs\", \"alpha\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n two_qs: bool = True,\n alpha: float = 0.99,\n ):\n super().__init__(base_model)\n hidden_size = hf_get_hidden_size(self.base_model.config)\n vocab_size = self.base_model.config.vocab_size\n dtype = next(hf_get_lm_head(self.base_model).parameters()).dtype\n self.two_qs = two_qs\n self.alpha = alpha\n self.ilql_heads = ILQLHeads(hidden_size, vocab_size, self.two_qs, self.alpha, dtype=dtype)\n\n def forward(\n self,\n input_ids,\n attention_mask=None,\n position_ids=None,\n past_key_values=None,\n actions_ixs=None,\n states_ixs=None,\n ):", "metadata": {"task_id": "CarperAI--trlx/39", "ground_truth": " forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n qs, target_qs, vs = self.ilql_heads(outputs.hidden_states[-1], states_ixs=states_ixs, actions_ixs=actions_ixs)\n\n return outputs.logits, qs, target_qs, vs, outputs.past_key_values\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ilql.py"], "context_start_lineno": 0, "lineno": 229, "function_name": "forward"}, "groundtruth": " forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n qs, target_qs, vs = self.ilql_heads(outputs.hidden_states[-1], states_ixs=states_ixs, actions_ixs=actions_ixs)\n\n return outputs.logits, qs, target_qs, vs, outputs.past_key_values\n"} +{"prompt": "import gc\nimport os\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import chain\n\nimport deepspeed # type: ignore\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport transformers\nfrom torch import nn\nfrom torchtyping import TensorType\n\nfrom trlx.data.ilql_types import ILQLBatch\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_hidden_size,\n hf_get_lm_head,\n make_head,\n)\n\n\ndef topk_mask(xs: torch.FloatTensor, k: int):\n if k > xs.shape[-1]:\n return xs\n mintop = torch.topk(xs, k)[0][:, -1].unsqueeze(-1)\n return torch.where(xs < mintop, -np.inf * torch.ones_like(xs, dtype=xs.dtype), xs)\n\n\ndef batched_index_select(\n x: TensorType[\"batch\", \"seq_len\", \"hidden\"],\n idxs: TensorType[\"batch\", \"index_len\"],\n dim: int,\n) -> TensorType[\"batch\", \"index_len\", \"hidden\"]:\n \"\"\"\n Gather vectors at idxs along dim from x\n \"\"\"\n idxs = idxs.unsqueeze(-1).expand(idxs.shape[0], idxs.shape[1], x.shape[-1])\n return x.gather(dim=dim, index=idxs)\n\n\n@dataclass\n@register_method\nclass ILQLConfig(MethodConfig):\n tau: float\n gamma: float\n cql_scale: float\n awac_scale: float\n alpha: float\n beta: float\n steps_for_target_q_sync: float\n two_qs: bool\n gen_kwargs: dict\n\n def loss(self, outputs, labels: ILQLBatch):\n logits, (qs, target_qs, vs) = outputs\n terminal_mask = labels.dones[:, :-1]\n n_nonterminal = max(1, terminal_mask.sum())\n\n actions = labels.input_ids[:, 1:].gather(dim=1, index=labels.actions_ixs).unsqueeze(-1)\n nactions = actions.shape[1]\n bsize, _, dsize = logits.shape\n\n Q = [q.gather(-1, actions).squeeze(-1) for q in qs]\n targetQs = [q.gather(-1, actions).squeeze(-1).detach() for q in target_qs]\n targetQ = reduce(torch.minimum, targetQs)\n\n # values of current states\n V = vs[:, :-1].squeeze()\n # values of next states\n Vnext = vs[:, 1:].squeeze() * labels.dones[:, 1:]\n # target to fit Q\n Q_ = labels.rewards + self.gamma * Vnext.detach()\n\n loss_qs = [((Qi - Q_) * terminal_mask).pow(2).sum() / n_nonterminal for Qi in Q]\n loss_q = sum(loss_qs)\n\n targetQ = targetQ.detach()\n\n loss_v = (\n (\n (targetQ >= V).int() * self.tau * (targetQ - V).pow(2)\n + (targetQ < V).int() * (1 - self.tau) * (targetQ - V).pow(2)\n )\n * terminal_mask\n ).sum() / n_nonterminal\n\n def cql_loss(q):\n loss = F.cross_entropy(q.reshape(-1, dsize), actions.reshape(-1), reduction=\"none\")\n loss = loss.reshape(bsize, nactions) * terminal_mask\n loss = loss.sum() / n_nonterminal\n return loss\n\n loss_cql = sum(cql_loss(q) for q in qs)\n\n # select logits from continuations\n action_logits = batched_index_select(logits, labels.actions_ixs, dim=1)\n cross_entropy = F.cross_entropy(\n action_logits.reshape(-1, dsize),\n actions.reshape(-1),\n reduction=\"none\",\n ).reshape(bsize, nactions)\n\n with torch.no_grad():\n awac_weight = torch.exp(self.beta * (targetQ - V))\n\n loss_awac = torch.sum(cross_entropy * awac_weight * terminal_mask) / n_nonterminal\n loss = loss_q + loss_v + self.cql_scale * loss_cql + self.awac_scale * loss_awac\n\n stats = dict(\n losses=dict(\n loss=loss.item(),\n loss_q=loss_q.item(),\n loss_v=loss_v.item(),\n loss_cql=loss_cql.item(),\n loss_awac=loss_awac.item(),\n ),\n values=get_tensor_stats(V, terminal_mask, n_nonterminal),\n qvalues={str(ix): get_tensor_stats(Q[ix], terminal_mask, n_nonterminal) for ix in range(len(Q))},\n awac_weight=get_tensor_stats(awac_weight, terminal_mask, n_nonterminal),\n )\n\n return loss, flatten_dict(stats)\n\n\nclass ILQLHeads(nn.Module):\n def __init__(\n self,\n hidden_size: int,\n vocab_size: int,\n two_qs: bool,\n alpha: float,\n dtype: type,\n ):\n super().__init__()\n\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.two_qs = two_qs\n self.alpha = alpha\n self.v_head = make_head(self.hidden_size, 1, dtype)\n\n n_qs = 2 if self.two_qs else 1\n self.q_heads = nn.ModuleList(make_head(self.hidden_size, self.vocab_size, dtype) for _ in range(n_qs))\n self.target_q_heads = nn.ModuleList(deepcopy(q_head) for q_head in self.q_heads)\n\n for target_q_head in self.target_q_heads:\n target_q_head.requires_grad_(False)\n\n def forward(\n self,\n hs: torch.Tensor,\n states_ixs: torch.Tensor = None,\n actions_ixs: torch.Tensor = None,\n **kwargs,\n ):\n if states_ixs is not None:\n states_hs = batched_index_select(hs, states_ixs, 1)\n actions_hs = batched_index_select(hs, actions_ixs, 1)\n else:\n states_hs = actions_hs = hs\n\n qs = tuple(q_head(actions_hs) for q_head in self.q_heads)\n target_qs = tuple(q_head(actions_hs) for q_head in self.target_q_heads)\n vs = self.v_head(states_hs)\n\n return qs, target_qs, vs\n\n def _sync_target_q_heads(self, alpha):\n for target_q_head, q_head in zip(self.target_q_heads, self.q_heads):\n for target_param, copy_param in zip(target_q_head.parameters(), q_head.parameters()):\n target_param.data.copy_((alpha * copy_param.data) + (1.0 - alpha) * target_param.data)\n\n def sync_target_q_heads(self):\n if os.environ.get(\"DEEPSPEED_ZERO_STAGE\", \"0\") == \"3\":\n params = chain(\n chain(q_head.parameters() for q_head in self.q_heads),\n chain(q_head.parameters() for q_head in self.target_q_heads),\n )\n\n with deepspeed.zero.GatheredParameters(list(params), modifier_rank=0):\n if deepspeed.comm.get_rank() == 0:\n self._sync_target_q_heads(self.alpha)\n else:\n self._sync_target_q_heads(self.alpha)\n\n\nclass AutoModelForCausalLMWithILQLHeads(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models wtih a language\n modeling head and ILQL heads.\n\n References:\n [1] Snell et al., \"Offline RL for Natural Language Generation with Implicit Language Q Learning\",\n https://arxiv.org/abs/2206.11871, 2022\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"ilql_heads\"]\n _supported_args = [\"two_qs\", \"alpha\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n two_qs: bool = True,\n alpha: float = 0.99,\n ):\n super().__init__(base_model)\n hidden_size = hf_get_hidden_size(self.base_model.config)\n vocab_size = self.base_model.config.vocab_size\n dtype = next(hf_get_lm_head(self.base_model).parameters()).dtype\n self.two_qs = two_qs\n self.alpha = alpha\n self.ilql_heads = ILQLHeads(hidden_size, vocab_size, self.two_qs, self.alpha, dtype=dtype)\n\n def forward(\n self,\n input_ids,\n attention_mask=None,\n position_ids=None,\n past_key_values=None,\n actions_ixs=None,\n states_ixs=None,\n ):\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n qs, target_qs, vs = self.ilql_heads(outputs.hidden_states[-1], states_ixs=states_ixs, actions_ixs=actions_ixs)\n\n return outputs.logits, qs, target_qs, vs, outputs.past_key_values\n\n def generate(\n self,\n input_ids,\n attention_mask=None,\n position_ids=None,\n past_key_values=None,\n beta=1,\n max_new_tokens=32,\n max_length=1024,\n temperature=1,\n top_k=20,\n logit_mask=None,\n pad_token_id=None,\n eos_token_id=None,\n ):\n \"\"\"\n Generates samples akin to hf's `.generate` but with custom logp prepossessing:\n changing token probabilities as to how advantageous they would be\n according to value functions estimations.\n \"\"\"\n pad_token_id = pad_token_id if pad_token_id is not None else self.base_model.config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.base_model.config.eos_token_id\n\n if attention_mask is None:\n attention_mask = input_ids.not_equal(pad_token_id)\n\n if position_ids is None:\n position_ids = attention_mask.cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask.eq(0), 0)\n\n samples = input_ids.clone()\n max_new_tokens = min(max_new_tokens, max_length - input_ids.shape[1])\n\n finished = torch.zeros(input_ids.shape[0], 1, dtype=torch.long, device=input_ids.device)\n for _ in range(max_new_tokens):\n out = self.forward(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n )\n\n logits, _, target_qs, vs, past_key_values = out\n if self.two_qs:\n qs = torch.minimum(target_qs[0][:, -1, :], target_qs[1][:, -1, :])\n else:\n qs = target_qs[:, -1, :]\n\n logits = logits[:, -1, :]\n vs = vs[:, -1, :]\n\n if logit_mask is not None:\n mask = logit_mask[input_ids[:, -1].squeeze().to(logit_mask.device)]\n logits[torch.where(mask)] = -np.inf\n\n adv = qs - vs\n pi_beta = F.log_softmax(logits, -1)\n pi_top_k = topk_mask(pi_beta + beta * adv, top_k)\n pi = F.softmax(pi_top_k / temperature, -1)\n\n input_ids = torch.multinomial(pi, num_samples=1)\n input_ids = (1 - finished) * input_ids + finished * eos_token_id\n finished = (input_ids == eos_token_id).long()\n\n samples = torch.hstack((samples, input_ids))\n attention_mask = torch.hstack((attention_mask, (input_ids != eos_token_id).long()))\n position_ids = (position_ids[:, -1] + 1).view(-1, 1)\n\n if torch.all(finished):\n break\n\n return samples\n\n def sync_target_q_heads(self):\n self.ilql_heads.sync_target_q_heads()\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the ilql heads\n to the state dictionary of the wrapped model by prepending the key with `ilql_heads.`.\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/40", "ground_truth": " base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n ilql_heads_state_dict = self.ilql_heads.state_dict(*args, **kwargs)\n for k, v in ilql_heads_state_dict.items():\n base_model_state_dict[f\"ilql_heads.{k}\"] = v\n return base_model_state_dict\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ilql.py"], "context_start_lineno": 0, "lineno": 323, "function_name": "state_dict"}, "groundtruth": " base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n ilql_heads_state_dict = self.ilql_heads.state_dict(*args, **kwargs)\n for k, v in ilql_heads_state_dict.items():\n base_model_state_dict[f\"ilql_heads.{k}\"] = v\n return base_model_state_dict\n"} +{"prompt": "import gc\nimport os\nfrom copy import deepcopy\nfrom dataclasses import dataclass\nfrom functools import reduce\nfrom itertools import chain\n\nimport deepspeed # type: ignore\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport transformers\nfrom torch import nn\nfrom torchtyping import TensorType\n\nfrom trlx.data.ilql_types import ILQLBatch\nfrom trlx.data.method_configs import MethodConfig, register_method\nfrom trlx.models.modeling_base import PreTrainedModelWrapper\nfrom trlx.utils.modeling import (\n flatten_dict,\n get_tensor_stats,\n hf_get_hidden_size,\n hf_get_lm_head,\n make_head,\n)\n\n\ndef topk_mask(xs: torch.FloatTensor, k: int):\n if k > xs.shape[-1]:\n return xs\n mintop = torch.topk(xs, k)[0][:, -1].unsqueeze(-1)\n return torch.where(xs < mintop, -np.inf * torch.ones_like(xs, dtype=xs.dtype), xs)\n\n\ndef batched_index_select(\n x: TensorType[\"batch\", \"seq_len\", \"hidden\"],\n idxs: TensorType[\"batch\", \"index_len\"],\n dim: int,\n) -> TensorType[\"batch\", \"index_len\", \"hidden\"]:\n \"\"\"\n Gather vectors at idxs along dim from x\n \"\"\"\n idxs = idxs.unsqueeze(-1).expand(idxs.shape[0], idxs.shape[1], x.shape[-1])\n return x.gather(dim=dim, index=idxs)\n\n\n@dataclass\n@register_method\nclass ILQLConfig(MethodConfig):\n tau: float\n gamma: float\n cql_scale: float\n awac_scale: float\n alpha: float\n beta: float\n steps_for_target_q_sync: float\n two_qs: bool\n gen_kwargs: dict\n\n def loss(self, outputs, labels: ILQLBatch):\n logits, (qs, target_qs, vs) = outputs\n terminal_mask = labels.dones[:, :-1]\n n_nonterminal = max(1, terminal_mask.sum())\n\n actions = labels.input_ids[:, 1:].gather(dim=1, index=labels.actions_ixs).unsqueeze(-1)\n nactions = actions.shape[1]\n bsize, _, dsize = logits.shape\n\n Q = [q.gather(-1, actions).squeeze(-1) for q in qs]\n targetQs = [q.gather(-1, actions).squeeze(-1).detach() for q in target_qs]\n targetQ = reduce(torch.minimum, targetQs)\n\n # values of current states\n V = vs[:, :-1].squeeze()\n # values of next states\n Vnext = vs[:, 1:].squeeze() * labels.dones[:, 1:]\n # target to fit Q\n Q_ = labels.rewards + self.gamma * Vnext.detach()\n\n loss_qs = [((Qi - Q_) * terminal_mask).pow(2).sum() / n_nonterminal for Qi in Q]\n loss_q = sum(loss_qs)\n\n targetQ = targetQ.detach()\n\n loss_v = (\n (\n (targetQ >= V).int() * self.tau * (targetQ - V).pow(2)\n + (targetQ < V).int() * (1 - self.tau) * (targetQ - V).pow(2)\n )\n * terminal_mask\n ).sum() / n_nonterminal\n\n def cql_loss(q):\n loss = F.cross_entropy(q.reshape(-1, dsize), actions.reshape(-1), reduction=\"none\")\n loss = loss.reshape(bsize, nactions) * terminal_mask\n loss = loss.sum() / n_nonterminal\n return loss\n\n loss_cql = sum(cql_loss(q) for q in qs)\n\n # select logits from continuations\n action_logits = batched_index_select(logits, labels.actions_ixs, dim=1)\n cross_entropy = F.cross_entropy(\n action_logits.reshape(-1, dsize),\n actions.reshape(-1),\n reduction=\"none\",\n ).reshape(bsize, nactions)\n\n with torch.no_grad():\n awac_weight = torch.exp(self.beta * (targetQ - V))\n\n loss_awac = torch.sum(cross_entropy * awac_weight * terminal_mask) / n_nonterminal\n loss = loss_q + loss_v + self.cql_scale * loss_cql + self.awac_scale * loss_awac\n\n stats = dict(\n losses=dict(\n loss=loss.item(),\n loss_q=loss_q.item(),\n loss_v=loss_v.item(),\n loss_cql=loss_cql.item(),\n loss_awac=loss_awac.item(),\n ),\n values=get_tensor_stats(V, terminal_mask, n_nonterminal),\n qvalues={str(ix): get_tensor_stats(Q[ix], terminal_mask, n_nonterminal) for ix in range(len(Q))},\n awac_weight=get_tensor_stats(awac_weight, terminal_mask, n_nonterminal),\n )\n\n return loss, flatten_dict(stats)\n\n\nclass ILQLHeads(nn.Module):\n def __init__(\n self,\n hidden_size: int,\n vocab_size: int,\n two_qs: bool,\n alpha: float,\n dtype: type,\n ):\n super().__init__()\n\n self.hidden_size = hidden_size\n self.vocab_size = vocab_size\n self.two_qs = two_qs\n self.alpha = alpha\n self.v_head = make_head(self.hidden_size, 1, dtype)\n\n n_qs = 2 if self.two_qs else 1\n self.q_heads = nn.ModuleList(make_head(self.hidden_size, self.vocab_size, dtype) for _ in range(n_qs))\n self.target_q_heads = nn.ModuleList(deepcopy(q_head) for q_head in self.q_heads)\n\n for target_q_head in self.target_q_heads:\n target_q_head.requires_grad_(False)\n\n def forward(\n self,\n hs: torch.Tensor,\n states_ixs: torch.Tensor = None,\n actions_ixs: torch.Tensor = None,\n **kwargs,\n ):\n if states_ixs is not None:\n states_hs = batched_index_select(hs, states_ixs, 1)\n actions_hs = batched_index_select(hs, actions_ixs, 1)\n else:\n states_hs = actions_hs = hs\n\n qs = tuple(q_head(actions_hs) for q_head in self.q_heads)\n target_qs = tuple(q_head(actions_hs) for q_head in self.target_q_heads)\n vs = self.v_head(states_hs)\n\n return qs, target_qs, vs\n\n def _sync_target_q_heads(self, alpha):\n for target_q_head, q_head in zip(self.target_q_heads, self.q_heads):\n for target_param, copy_param in zip(target_q_head.parameters(), q_head.parameters()):\n target_param.data.copy_((alpha * copy_param.data) + (1.0 - alpha) * target_param.data)\n\n def sync_target_q_heads(self):\n if os.environ.get(\"DEEPSPEED_ZERO_STAGE\", \"0\") == \"3\":\n params = chain(\n chain(q_head.parameters() for q_head in self.q_heads),\n chain(q_head.parameters() for q_head in self.target_q_heads),\n )\n\n with deepspeed.zero.GatheredParameters(list(params), modifier_rank=0):\n if deepspeed.comm.get_rank() == 0:\n self._sync_target_q_heads(self.alpha)\n else:\n self._sync_target_q_heads(self.alpha)\n\n\nclass AutoModelForCausalLMWithILQLHeads(PreTrainedModelWrapper):\n \"\"\"An `AutoModel` class wrapper for `transformers` causal models wtih a language\n modeling head and ILQL heads.\n\n References:\n [1] Snell et al., \"Offline RL for Natural Language Generation with Implicit Language Q Learning\",\n https://arxiv.org/abs/2206.11871, 2022\n \"\"\"\n\n _auto_model_parent_class = transformers.AutoModelForCausalLM\n _supported_modules = [\"ilql_heads\"]\n _supported_args = [\"two_qs\", \"alpha\"]\n\n def __init__(\n self,\n base_model: transformers.PreTrainedModel,\n *,\n two_qs: bool = True,\n alpha: float = 0.99,\n ):\n super().__init__(base_model)\n hidden_size = hf_get_hidden_size(self.base_model.config)\n vocab_size = self.base_model.config.vocab_size\n dtype = next(hf_get_lm_head(self.base_model).parameters()).dtype\n self.two_qs = two_qs\n self.alpha = alpha\n self.ilql_heads = ILQLHeads(hidden_size, vocab_size, self.two_qs, self.alpha, dtype=dtype)\n\n def forward(\n self,\n input_ids,\n attention_mask=None,\n position_ids=None,\n past_key_values=None,\n actions_ixs=None,\n states_ixs=None,\n ):\n forward_kwargs = self.get_compatible_forward_kwargs(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n )\n forward_kwargs[\"output_hidden_states\"] = True\n\n outputs = self.base_model(**forward_kwargs)\n qs, target_qs, vs = self.ilql_heads(outputs.hidden_states[-1], states_ixs=states_ixs, actions_ixs=actions_ixs)\n\n return outputs.logits, qs, target_qs, vs, outputs.past_key_values\n\n def generate(\n self,\n input_ids,\n attention_mask=None,\n position_ids=None,\n past_key_values=None,\n beta=1,\n max_new_tokens=32,\n max_length=1024,\n temperature=1,\n top_k=20,\n logit_mask=None,\n pad_token_id=None,\n eos_token_id=None,\n ):\n \"\"\"\n Generates samples akin to hf's `.generate` but with custom logp prepossessing:\n changing token probabilities as to how advantageous they would be\n according to value functions estimations.\n \"\"\"\n pad_token_id = pad_token_id if pad_token_id is not None else self.base_model.config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.base_model.config.eos_token_id\n\n if attention_mask is None:\n attention_mask = input_ids.not_equal(pad_token_id)\n\n if position_ids is None:\n position_ids = attention_mask.cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask.eq(0), 0)\n\n samples = input_ids.clone()\n max_new_tokens = min(max_new_tokens, max_length - input_ids.shape[1])\n\n finished = torch.zeros(input_ids.shape[0], 1, dtype=torch.long, device=input_ids.device)\n for _ in range(max_new_tokens):\n out = self.forward(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n )\n\n logits, _, target_qs, vs, past_key_values = out\n if self.two_qs:\n qs = torch.minimum(target_qs[0][:, -1, :], target_qs[1][:, -1, :])\n else:\n qs = target_qs[:, -1, :]\n\n logits = logits[:, -1, :]\n vs = vs[:, -1, :]\n\n if logit_mask is not None:\n mask = logit_mask[input_ids[:, -1].squeeze().to(logit_mask.device)]\n logits[torch.where(mask)] = -np.inf\n\n adv = qs - vs\n pi_beta = F.log_softmax(logits, -1)\n pi_top_k = topk_mask(pi_beta + beta * adv, top_k)\n pi = F.softmax(pi_top_k / temperature, -1)\n\n input_ids = torch.multinomial(pi, num_samples=1)\n input_ids = (1 - finished) * input_ids + finished * eos_token_id\n finished = (input_ids == eos_token_id).long()\n\n samples = torch.hstack((samples, input_ids))\n attention_mask = torch.hstack((attention_mask, (input_ids != eos_token_id).long()))\n position_ids = (position_ids[:, -1] + 1).view(-1, 1)\n\n if torch.all(finished):\n break\n\n return samples\n\n def sync_target_q_heads(self):\n self.ilql_heads.sync_target_q_heads()\n\n def state_dict(self, *args, **kwargs):\n \"\"\"\n Returns the state dictionary of the model. We add the state dictionary of the ilql heads\n to the state dictionary of the wrapped model by prepending the key with `ilql_heads.`.\n \"\"\"\n base_model_state_dict = self.base_model.state_dict(*args, **kwargs)\n ilql_heads_state_dict = self.ilql_heads.state_dict(*args, **kwargs)\n for k, v in ilql_heads_state_dict.items():\n base_model_state_dict[f\"ilql_heads.{k}\"] = v\n return base_model_state_dict\n\n def post_init(self, state_dict):\n \"\"\"\n We add the state dictionary of the ilql heads to the state dictionary of the wrapped model\n by preprending the key with `ilql_heads.`. This function removes the `ilql_heads.` prefix from the\n keys of the value head state dictionary.\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/41", "ground_truth": " for k in list(state_dict.keys()):\n if \"ilql_heads.\" in k:\n state_dict[k.replace(\"ilql_heads.\", \"\")] = state_dict.pop(k)\n self.ilql_heads.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect()\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_ilql.py"], "context_start_lineno": 0, "lineno": 335, "function_name": "post_init"}, "groundtruth": " for k in list(state_dict.keys()):\n if \"ilql_heads.\" in k:\n state_dict[k.replace(\"ilql_heads.\", \"\")] = state_dict.pop(k)\n self.ilql_heads.load_state_dict(state_dict, strict=False)\n del state_dict\n gc.collect()\n"} +{"prompt": "# Copyright 2022 CarperAI & The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This file contains a modified version of the `PreTrainedModelWrapper` class from\n# HuggingFace's `trl` library. The original source code can be found here:\n# https://github.com/lvwerra/trl/blob/78c13226bf8ea1ccd9b1c091f03a938098521f6c/trl/models/modeling_base.py\n\nimport inspect\nimport json\nimport os\nfrom typing import Any, Dict, List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom huggingface_hub import hf_hub_download\n\n\nclass PreTrainedModelWrapper(nn.Module, transformers.utils.PushToHubMixin):\n \"\"\"A wrapper around `transformers.PreTrainedModel`\n\n Reference: @younesbelkada's `PreTrainedModelWrapper`\n https://github.com/lvwerra/trl/blob/4f5c16fafde42d9aca971952bcdcc1f5a0a68cf0/trl/models/modeling_base.py#L2\n\n Attributes:\n _auto_model_parent_class (transformers.AutoModel): The `transformers.AutoModel`\n type to base the wrapping behavior off of, e.g. `transformers.AutoModelForCausalLM`.\n _supported_modules (List[str]): A list of attribute names for modules of\n the underlying architecture model. This is used, for example, to save\n and load any additional modules by manipulating the state dict.\n _supported_args (List[str]): A list of arguments specific to the underlying\n architecture to separate from arguments that are supported by the\n parent `AutoModel` class. Any arguments that are not supported by the\n underlying model will be passed to the parent `AutoModel` class.\n \"\"\"\n\n _auto_model_parent_class: transformers.AutoModel = None\n _supported_modules: List[str] = None\n # TODO (jon-tow): Supported args should come from a `PretrainedConfig` of the\n # specific underlying type similar to how config instances can be used to instantiate\n # `transformers.PreTrainedModel`s.\n _supported_args: List[str] = None\n\n def __init__(self, base_model: Optional[transformers.PreTrainedModel] = None, **kwargs):", "metadata": {"task_id": "CarperAI--trlx/42", "ground_truth": " super().__init__()\n self.base_model = base_model\n # cache `forward` args for general use (avoids incompatible args across architectures)\n self.forward_kwargs = inspect.getfullargspec(self.base_model.forward).args\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_base.py"], "context_start_lineno": 0, "lineno": 55, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.base_model = base_model\n # cache `forward` args for general use (avoids incompatible args across architectures)\n self.forward_kwargs = inspect.getfullargspec(self.base_model.forward).args\n"} +{"prompt": "# Copyright 2022 CarperAI & The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This file contains a modified version of the `PreTrainedModelWrapper` class from\n# HuggingFace's `trl` library. The original source code can be found here:\n# https://github.com/lvwerra/trl/blob/78c13226bf8ea1ccd9b1c091f03a938098521f6c/trl/models/modeling_base.py\n\nimport inspect\nimport json\nimport os\nfrom typing import Any, Dict, List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom huggingface_hub import hf_hub_download\n\n\nclass PreTrainedModelWrapper(nn.Module, transformers.utils.PushToHubMixin):\n \"\"\"A wrapper around `transformers.PreTrainedModel`\n\n Reference: @younesbelkada's `PreTrainedModelWrapper`\n https://github.com/lvwerra/trl/blob/4f5c16fafde42d9aca971952bcdcc1f5a0a68cf0/trl/models/modeling_base.py#L2\n\n Attributes:\n _auto_model_parent_class (transformers.AutoModel): The `transformers.AutoModel`\n type to base the wrapping behavior off of, e.g. `transformers.AutoModelForCausalLM`.\n _supported_modules (List[str]): A list of attribute names for modules of\n the underlying architecture model. This is used, for example, to save\n and load any additional modules by manipulating the state dict.\n _supported_args (List[str]): A list of arguments specific to the underlying\n architecture to separate from arguments that are supported by the\n parent `AutoModel` class. Any arguments that are not supported by the\n underlying model will be passed to the parent `AutoModel` class.\n \"\"\"\n\n _auto_model_parent_class: transformers.AutoModel = None\n _supported_modules: List[str] = None\n # TODO (jon-tow): Supported args should come from a `PretrainedConfig` of the\n # specific underlying type similar to how config instances can be used to instantiate\n # `transformers.PreTrainedModel`s.\n _supported_args: List[str] = None\n\n def __init__(self, base_model: Optional[transformers.PreTrainedModel] = None, **kwargs):\n super().__init__()\n self.base_model = base_model\n # cache `forward` args for general use (avoids incompatible args across architectures)\n self.forward_kwargs = inspect.getfullargspec(self.base_model.forward).args\n\n @classmethod\n def _split_kwargs(cls, kwargs: Dict[str, Any]):\n \"\"\"Separates the kwargs from the supported arguments within `supported_args`\n and those that are not\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/43", "ground_truth": " supported_kwargs = {}\n unsupported_kwargs = {}\n for key, value in kwargs.items():\n if key in cls._supported_args:\n supported_kwargs[key] = value\n else:\n unsupported_kwargs[key] = value\n return supported_kwargs, unsupported_kwargs\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_base.py"], "context_start_lineno": 0, "lineno": 65, "function_name": "_split_kwargs"}, "groundtruth": " supported_kwargs = {}\n unsupported_kwargs = {}\n for key, value in kwargs.items():\n if key in cls._supported_args:\n supported_kwargs[key] = value\n else:\n unsupported_kwargs[key] = value\n return supported_kwargs, unsupported_kwargs\n"} +{"prompt": "# Copyright 2022 CarperAI & The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This file contains a modified version of the `PreTrainedModelWrapper` class from\n# HuggingFace's `trl` library. The original source code can be found here:\n# https://github.com/lvwerra/trl/blob/78c13226bf8ea1ccd9b1c091f03a938098521f6c/trl/models/modeling_base.py\n\nimport inspect\nimport json\nimport os\nfrom typing import Any, Dict, List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom huggingface_hub import hf_hub_download\n\n\nclass PreTrainedModelWrapper(nn.Module, transformers.utils.PushToHubMixin):\n \"\"\"A wrapper around `transformers.PreTrainedModel`\n\n Reference: @younesbelkada's `PreTrainedModelWrapper`\n https://github.com/lvwerra/trl/blob/4f5c16fafde42d9aca971952bcdcc1f5a0a68cf0/trl/models/modeling_base.py#L2\n\n Attributes:\n _auto_model_parent_class (transformers.AutoModel): The `transformers.AutoModel`\n type to base the wrapping behavior off of, e.g. `transformers.AutoModelForCausalLM`.\n _supported_modules (List[str]): A list of attribute names for modules of\n the underlying architecture model. This is used, for example, to save\n and load any additional modules by manipulating the state dict.\n _supported_args (List[str]): A list of arguments specific to the underlying\n architecture to separate from arguments that are supported by the\n parent `AutoModel` class. Any arguments that are not supported by the\n underlying model will be passed to the parent `AutoModel` class.\n \"\"\"\n\n _auto_model_parent_class: transformers.AutoModel = None\n _supported_modules: List[str] = None\n # TODO (jon-tow): Supported args should come from a `PretrainedConfig` of the\n # specific underlying type similar to how config instances can be used to instantiate\n # `transformers.PreTrainedModel`s.\n _supported_args: List[str] = None\n\n def __init__(self, base_model: Optional[transformers.PreTrainedModel] = None, **kwargs):\n super().__init__()\n self.base_model = base_model\n # cache `forward` args for general use (avoids incompatible args across architectures)\n self.forward_kwargs = inspect.getfullargspec(self.base_model.forward).args\n\n @classmethod\n def _split_kwargs(cls, kwargs: Dict[str, Any]):\n \"\"\"Separates the kwargs from the supported arguments within `supported_args`\n and those that are not\n \"\"\"\n supported_kwargs = {}\n unsupported_kwargs = {}\n for key, value in kwargs.items():\n if key in cls._supported_args:\n supported_kwargs[key] = value\n else:\n unsupported_kwargs[key] = value\n return supported_kwargs, unsupported_kwargs\n\n @classmethod\n def from_config(cls, config: transformers.PretrainedConfig, **kwargs):\n \"\"\"Instantiate the pretrained pytorch model from a configuration.\n\n Args:\n config (transformers.PretrainedConfig): The configuration to use to\n instantiate the base model.\n\n NOTE: Loading a model from its configuration file does **not** load the\n model weights. It only affects the model's configuration. Use\n `~transformers.AutoModel.from_pretrained` to load the model weights.\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/44", "ground_truth": " if kwargs is not None:\n wrapped_model_kwargs, from_config_kwargs = cls._split_kwargs(kwargs)\n else:\n from_config_kwargs = {}\n wrapped_model_kwargs = {}\n base_model = cls._auto_model_parent_class.from_config(config, **from_config_kwargs)\n model = cls(base_model, **wrapped_model_kwargs)\n return model\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_base.py"], "context_start_lineno": 0, "lineno": 86, "function_name": "from_config"}, "groundtruth": " if kwargs is not None:\n wrapped_model_kwargs, from_config_kwargs = cls._split_kwargs(kwargs)\n else:\n from_config_kwargs = {}\n wrapped_model_kwargs = {}\n base_model = cls._auto_model_parent_class.from_config(config, **from_config_kwargs)\n model = cls(base_model, **wrapped_model_kwargs)\n return model\n"} +{"prompt": "# Copyright 2022 CarperAI & The HuggingFace Team. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# NOTE: This file contains a modified version of the `PreTrainedModelWrapper` class from\n# HuggingFace's `trl` library. The original source code can be found here:\n# https://github.com/lvwerra/trl/blob/78c13226bf8ea1ccd9b1c091f03a938098521f6c/trl/models/modeling_base.py\n\nimport inspect\nimport json\nimport os\nfrom typing import Any, Dict, List, Optional, Union\n\nimport torch\nimport torch.nn as nn\nimport transformers\nfrom huggingface_hub import hf_hub_download\n\n\nclass PreTrainedModelWrapper(nn.Module, transformers.utils.PushToHubMixin):\n \"\"\"A wrapper around `transformers.PreTrainedModel`\n\n Reference: @younesbelkada's `PreTrainedModelWrapper`\n https://github.com/lvwerra/trl/blob/4f5c16fafde42d9aca971952bcdcc1f5a0a68cf0/trl/models/modeling_base.py#L2\n\n Attributes:\n _auto_model_parent_class (transformers.AutoModel): The `transformers.AutoModel`\n type to base the wrapping behavior off of, e.g. `transformers.AutoModelForCausalLM`.\n _supported_modules (List[str]): A list of attribute names for modules of\n the underlying architecture model. This is used, for example, to save\n and load any additional modules by manipulating the state dict.\n _supported_args (List[str]): A list of arguments specific to the underlying\n architecture to separate from arguments that are supported by the\n parent `AutoModel` class. Any arguments that are not supported by the\n underlying model will be passed to the parent `AutoModel` class.\n \"\"\"\n\n _auto_model_parent_class: transformers.AutoModel = None\n _supported_modules: List[str] = None\n # TODO (jon-tow): Supported args should come from a `PretrainedConfig` of the\n # specific underlying type similar to how config instances can be used to instantiate\n # `transformers.PreTrainedModel`s.\n _supported_args: List[str] = None\n\n def __init__(self, base_model: Optional[transformers.PreTrainedModel] = None, **kwargs):\n super().__init__()\n self.base_model = base_model\n # cache `forward` args for general use (avoids incompatible args across architectures)\n self.forward_kwargs = inspect.getfullargspec(self.base_model.forward).args\n\n @classmethod\n def _split_kwargs(cls, kwargs: Dict[str, Any]):\n \"\"\"Separates the kwargs from the supported arguments within `supported_args`\n and those that are not\n \"\"\"\n supported_kwargs = {}\n unsupported_kwargs = {}\n for key, value in kwargs.items():\n if key in cls._supported_args:\n supported_kwargs[key] = value\n else:\n unsupported_kwargs[key] = value\n return supported_kwargs, unsupported_kwargs\n\n @classmethod\n def from_config(cls, config: transformers.PretrainedConfig, **kwargs):\n \"\"\"Instantiate the pretrained pytorch model from a configuration.\n\n Args:\n config (transformers.PretrainedConfig): The configuration to use to\n instantiate the base model.\n\n NOTE: Loading a model from its configuration file does **not** load the\n model weights. It only affects the model's configuration. Use\n `~transformers.AutoModel.from_pretrained` to load the model weights.\n \"\"\"\n if kwargs is not None:\n wrapped_model_kwargs, from_config_kwargs = cls._split_kwargs(kwargs)\n else:\n from_config_kwargs = {}\n wrapped_model_kwargs = {}\n base_model = cls._auto_model_parent_class.from_config(config, **from_config_kwargs)\n model = cls(base_model, **wrapped_model_kwargs)\n return model\n\n @classmethod\n def from_pretrained( # noqa: max-complexity\n cls,\n pretrained_model_name_or_path: Union[str, transformers.PreTrainedModel],\n *model_args,\n **kwargs,\n ):\n \"\"\"Instantiate a pretrained pytorch model from a pretrained model configuration.\n This method is a wrapper around `transformers.PreTrainedModel.from_pretrained`.\n Please refer to the documentation of `transformers.PreTrainedModel.from_pretrained`\n for more information.\n\n Args:\n pretrained_model_name_or_path (str or `transformers.PreTrainedModel`):\n The identifier of the pretrained model to load or the pretrained model itself.\n *model_args (sequence of positional arguments, *optional*):\n All remaining positional arguments will be passed to the `_auto_model_parent_class`.\n **kwargs (dict, *optional*):\n Dictionary of keyword arguments to pass to both the underlying `_auto_model_parent_class`\n call (e.g. `transformers.AutoModelForCausalLM.from_pretrained`) and the specific\n instance of the wrapped model.\n\n NOTE: You must pass in arguments specific to the wrapped model as keyword arguments.\n \"\"\"\n if kwargs is not None:\n wrapped_model_kwargs, from_pretrained_kwargs = cls._split_kwargs(kwargs)\n else:\n from_pretrained_kwargs = {}\n wrapped_model_kwargs = {}\n\n if isinstance(pretrained_model_name_or_path, str):\n # Load the base model using the `transformers` AutoClass (e.g. AutoModelForCausalLM)\n base_model = cls._auto_model_parent_class.from_pretrained(\n pretrained_model_name_or_path, *model_args, **from_pretrained_kwargs\n )\n elif isinstance(pretrained_model_name_or_path, transformers.PreTrainedModel):\n base_model = pretrained_model_name_or_path\n else:\n raise ValueError(\n f\"Invalid type for `base_model_name_or_path`: {type(pretrained_model_name_or_path)}\"\n \"Expected `str` or `transformers.PreTrainedModel`.\"\n )\n\n model = cls(base_model, **wrapped_model_kwargs)\n\n if isinstance(pretrained_model_name_or_path, str):\n filename = os.path.join(pretrained_model_name_or_path, \"pytorch_model.bin\")\n sharded_index_filename = os.path.join(pretrained_model_name_or_path, \"pytorch_model.bin.index.json\")\n is_sharded = False\n\n if not os.path.exists(filename):\n try:\n filename = hf_hub_download(pretrained_model_name_or_path, \"pytorch_model.bin\")\n # Sharded\n except Exception:\n if os.path.exists(sharded_index_filename):\n index_file_name = sharded_index_filename\n else:\n index_file_name = hf_hub_download(\n pretrained_model_name_or_path,\n \"pytorch_model.bin.index.json\",\n )\n with open(index_file_name, \"r\") as f:\n index = json.load(f)\n # Collect files containing weights from supported modules\n files_to_download = set()\n for k, v in index[\"weight_map\"].items():\n if any([module in k for module in cls._supported_modules]):\n files_to_download.add(v)\n is_sharded = True\n\n if is_sharded:\n # Merge each shard into a state dict\n # TODO: Optimize this to avoid wasting RAM\n state_dict = {}\n for shard_file in files_to_download:\n filename = os.path.join(pretrained_model_name_or_path, shard_file)\n # Download if shard file doesn't exist locally\n if not os.path.exists(filename):\n filename = hf_hub_download(pretrained_model_name_or_path, shard_file)\n state_dict.update(torch.load(filename, map_location=\"cpu\"))\n else:\n state_dict = torch.load(filename, map_location=\"cpu\")\n else:\n state_dict = pretrained_model_name_or_path.state_dict()\n\n model.post_init(state_dict=state_dict)\n return model\n\n def save_pretrained(self, *args, **kwargs):\n \"\"\"Save the pretrained model to a directory. This method is a wrapper\n around `transformers.PreTrainedModel.save_pretrained`. Please refer to\n the documentation of `transformers.PreTrainedModel.save_pretrained` for\n more information.\n\n Args:\n *args (`list`, *optional*):\n Positional arguments passed along to the underlying model's\n `save_pretrained` method.\n **kwargs (`dict`, *optional*):\n Keyword arguments passed along to the underlying model's\n `save_pretrained` method.\n \"\"\"", "metadata": {"task_id": "CarperAI--trlx/45", "ground_truth": " state_dict = kwargs.pop(\"state_dict\", None)\n if state_dict is None:\n state_dict = self.state_dict()\n kwargs[\"state_dict\"] = state_dict\n\n return self.base_model.save_pretrained(*args, **kwargs)\n", "fpath_tuple": ["CarperAI_trlx", "trlx", "models", "modeling_base.py"], "context_start_lineno": 0, "lineno": 198, "function_name": "save_pretrained"}, "groundtruth": " state_dict = kwargs.pop(\"state_dict\", None)\n if state_dict is None:\n state_dict = self.state_dict()\n kwargs[\"state_dict\"] = state_dict\n\n return self.base_model.save_pretrained(*args, **kwargs)\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/0", "ground_truth": " if len(arr) == 0:\n return d\n return arr[0]\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 41, "function_name": "first"}, "groundtruth": " if len(arr) == 0:\n return d\n return arr[0]\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)", "metadata": {"task_id": "lucidrains--imagen-pytorch/1", "ground_truth": " def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 47, "function_name": "maybe"}, "groundtruth": " def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):", "metadata": {"task_id": "lucidrains--imagen-pytorch/2", "ground_truth": " if not exists(x):\n return x\n return fn(x)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 48, "function_name": "inner"}, "groundtruth": " if not exists(x):\n return x\n return fn(x)\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):", "metadata": {"task_id": "lucidrains--imagen-pytorch/3", "ground_truth": " called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 54, "function_name": "once"}, "groundtruth": " called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):", "metadata": {"task_id": "lucidrains--imagen-pytorch/4", "ground_truth": " nonlocal called\n if called:\n return\n called = True\n return fn(x)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 57, "function_name": "inner"}, "groundtruth": " nonlocal called\n if called:\n return\n called = True\n return fn(x)\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):", "metadata": {"task_id": "lucidrains--imagen-pytorch/5", "ground_truth": " if exists(val):\n return val\n return d() if callable(d) else d\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 67, "function_name": "default"}, "groundtruth": " if exists(val):\n return val\n return d() if callable(d) else d\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/6", "ground_truth": " if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 72, "function_name": "cast_tuple"}, "groundtruth": " if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):", "metadata": {"task_id": "lucidrains--imagen-pytorch/7", "ground_truth": " if not images.dtype == torch.uint8:\n return images\n return images / 255\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 94, "function_name": "cast_uint8_images_to_float"}, "groundtruth": " if not images.dtype == torch.uint8:\n return images\n return images / 255\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):", "metadata": {"task_id": "lucidrains--imagen-pytorch/8", "ground_truth": " nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 102, "function_name": "zero_init_"}, "groundtruth": " nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/9", "ground_truth": " remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 116, "function_name": "pad_tuple_to_length"}, "groundtruth": " remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):", "metadata": {"task_id": "lucidrains--imagen-pytorch/10", "ground_truth": " padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 139, "function_name": "right_pad_dims_to"}, "groundtruth": " padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/11", "ground_truth": " if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 145, "function_name": "masked_mean"}, "groundtruth": " if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/12", "ground_truth": " if len(tup) <= index:\n return default\n return tup[index]\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 188, "function_name": "safe_get_tuple_index"}, "groundtruth": " if len(tup) <= index:\n return default\n return tup[index]\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):", "metadata": {"task_id": "lucidrains--imagen-pytorch/13", "ground_truth": " if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 204, "function_name": "prob_mask_like"}, "groundtruth": " if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):\n if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\n# gaussian diffusion with continuous time helper functions and classes\n# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py\n\n@torch.jit.script\ndef beta_linear_log_snr(t):\n return -torch.log(expm1(1e-4 + 10 * (t ** 2)))\n\n@torch.jit.script\ndef alpha_cosine_log_snr(t, s: float = 0.008):\n return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version\n\ndef log_snr_to_alpha_sigma(log_snr):\n return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))\n\nclass GaussianDiffusionContinuousTimes(nn.Module):\n def __init__(self, *, noise_schedule, timesteps = 1000):", "metadata": {"task_id": "lucidrains--imagen-pytorch/14", "ground_truth": " super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 227, "function_name": "__init__"}, "groundtruth": " super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):\n if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\n# gaussian diffusion with continuous time helper functions and classes\n# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py\n\n@torch.jit.script\ndef beta_linear_log_snr(t):\n return -torch.log(expm1(1e-4 + 10 * (t ** 2)))\n\n@torch.jit.script\ndef alpha_cosine_log_snr(t, s: float = 0.008):\n return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version\n\ndef log_snr_to_alpha_sigma(log_snr):\n return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))\n\nclass GaussianDiffusionContinuousTimes(nn.Module):\n def __init__(self, *, noise_schedule, timesteps = 1000):\n super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n\n def get_times(self, batch_size, noise_level, *, device):\n return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)\n\n def sample_random_times(self, batch_size, *, device):\n return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)\n\n def get_condition(self, times):\n return maybe(self.log_snr)(times)\n\n def get_sampling_timesteps(self, batch, *, device):\n times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)\n times = repeat(times, 't -> b t', b = batch)\n times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)\n times = times.unbind(dim = -1)\n return times\n\n def q_posterior(self, x_start, x_t, t, *, t_next = None):\n t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))\n\n \"\"\" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material \"\"\"\n log_snr = self.log_snr(t)\n log_snr_next = self.log_snr(t_next)\n log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/15", "ground_truth": " dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 275, "function_name": "q_sample"}, "groundtruth": " dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):\n if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\n# gaussian diffusion with continuous time helper functions and classes\n# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py\n\n@torch.jit.script\ndef beta_linear_log_snr(t):\n return -torch.log(expm1(1e-4 + 10 * (t ** 2)))\n\n@torch.jit.script\ndef alpha_cosine_log_snr(t, s: float = 0.008):\n return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version\n\ndef log_snr_to_alpha_sigma(log_snr):\n return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))\n\nclass GaussianDiffusionContinuousTimes(nn.Module):\n def __init__(self, *, noise_schedule, timesteps = 1000):\n super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n\n def get_times(self, batch_size, noise_level, *, device):\n return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)\n\n def sample_random_times(self, batch_size, *, device):\n return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)\n\n def get_condition(self, times):\n return maybe(self.log_snr)(times)\n\n def get_sampling_timesteps(self, batch, *, device):\n times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)\n times = repeat(times, 't -> b t', b = batch)\n times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)\n times = times.unbind(dim = -1)\n return times\n\n def q_posterior(self, x_start, x_t, t, *, t_next = None):\n t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))\n\n \"\"\" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material \"\"\"\n log_snr = self.log_snr(t)\n log_snr_next = self.log_snr(t_next)\n log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):", "metadata": {"task_id": "lucidrains--imagen-pytorch/16", "ground_truth": " super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 326, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):\n if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\n# gaussian diffusion with continuous time helper functions and classes\n# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py\n\n@torch.jit.script\ndef beta_linear_log_snr(t):\n return -torch.log(expm1(1e-4 + 10 * (t ** 2)))\n\n@torch.jit.script\ndef alpha_cosine_log_snr(t, s: float = 0.008):\n return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version\n\ndef log_snr_to_alpha_sigma(log_snr):\n return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))\n\nclass GaussianDiffusionContinuousTimes(nn.Module):\n def __init__(self, *, noise_schedule, timesteps = 1000):\n super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n\n def get_times(self, batch_size, noise_level, *, device):\n return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)\n\n def sample_random_times(self, batch_size, *, device):\n return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)\n\n def get_condition(self, times):\n return maybe(self.log_snr)(times)\n\n def get_sampling_timesteps(self, batch, *, device):\n times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)\n times = repeat(times, 't -> b t', b = batch)\n times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)\n times = times.unbind(dim = -1)\n return times\n\n def q_posterior(self, x_start, x_t, t, *, t_next = None):\n t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))\n\n \"\"\" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material \"\"\"\n log_snr = self.log_snr(t)\n log_snr_next = self.log_snr(t_next)\n log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):", "metadata": {"task_id": "lucidrains--imagen-pytorch/17", "ground_truth": " dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 333, "function_name": "forward"}, "groundtruth": " dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n"} +{"prompt": "import math\nimport copy\nfrom random import random\nfrom beartype.typing import List, Union\nfrom beartype import beartype\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.nn.parallel import DistributedDataParallel\nfrom torch import nn, einsum\nfrom torch.cuda.amp import autocast\nfrom torch.special import expm1\nimport torchvision.transforms as T\n\nimport kornia.augmentation as K\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\nfrom imagen_pytorch.imagen_video import Unet3D, resize_video_to, scale_video_time\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):\n called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n\nprint_once = once(print)\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = None):\n if isinstance(val, list):\n val = tuple(val)\n\n output = val if isinstance(val, tuple) else ((val,) * default(length, 1))\n\n if exists(length):\n assert len(output) == length\n\n return output\n\ndef compact(input_dict):\n return {key: value for key, value in input_dict.items() if exists(value)}\n\ndef maybe_transform_dict_key(input_dict, key, fn):\n if key not in input_dict:\n return input_dict\n\n copied_dict = input_dict.copy()\n copied_dict[key] = fn(copied_dict[key])\n return copied_dict\n\ndef cast_uint8_images_to_float(images):\n if not images.dtype == torch.uint8:\n return images\n return images / 255\n\ndef module_device(module):\n return next(module.parameters()).device\n\ndef zero_init_(m):\n nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):\n if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\n# gaussian diffusion with continuous time helper functions and classes\n# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py\n\n@torch.jit.script\ndef beta_linear_log_snr(t):\n return -torch.log(expm1(1e-4 + 10 * (t ** 2)))\n\n@torch.jit.script\ndef alpha_cosine_log_snr(t, s: float = 0.008):\n return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version\n\ndef log_snr_to_alpha_sigma(log_snr):\n return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))\n\nclass GaussianDiffusionContinuousTimes(nn.Module):\n def __init__(self, *, noise_schedule, timesteps = 1000):\n super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n\n def get_times(self, batch_size, noise_level, *, device):\n return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)\n\n def sample_random_times(self, batch_size, *, device):\n return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)\n\n def get_condition(self, times):\n return maybe(self.log_snr)(times)\n\n def get_sampling_timesteps(self, batch, *, device):\n times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)\n times = repeat(times, 't -> b t', b = batch)\n times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)\n times = times.unbind(dim = -1)\n return times\n\n def q_posterior(self, x_start, x_t, t, *, t_next = None):\n t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))\n\n \"\"\" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material \"\"\"\n log_snr = self.log_snr(t)\n log_snr_next = self.log_snr(t_next)\n log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/18", "ground_truth": " super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 0, "lineno": 381, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n"} +{"prompt": " nn.init.zeros_(m.weight)\n if exists(m.bias):\n nn.init.zeros_(m.bias)\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef pad_tuple_to_length(t, length, fillvalue = None):\n remain_length = length - len(t)\n if remain_length <= 0:\n return t\n return (*t, *((fillvalue,) * remain_length))\n\n# helper classes\n\nclass Identity(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# tensor helpers\n\ndef log(t, eps: float = 1e-12):\n return torch.log(t.clamp(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):\n if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\n# gaussian diffusion with continuous time helper functions and classes\n# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py\n\n@torch.jit.script\ndef beta_linear_log_snr(t):\n return -torch.log(expm1(1e-4 + 10 * (t ** 2)))\n\n@torch.jit.script\ndef alpha_cosine_log_snr(t, s: float = 0.008):\n return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version\n\ndef log_snr_to_alpha_sigma(log_snr):\n return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))\n\nclass GaussianDiffusionContinuousTimes(nn.Module):\n def __init__(self, *, noise_schedule, timesteps = 1000):\n super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n\n def get_times(self, batch_size, noise_level, *, device):\n return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)\n\n def sample_random_times(self, batch_size, *, device):\n return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)\n\n def get_condition(self, times):\n return maybe(self.log_snr)(times)\n\n def get_sampling_timesteps(self, batch, *, device):\n times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)\n times = repeat(times, 't -> b t', b = batch)\n times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)\n times = times.unbind(dim = -1)\n return times\n\n def q_posterior(self, x_start, x_t, t, *, t_next = None):\n t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))\n\n \"\"\" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material \"\"\"\n log_snr = self.log_snr(t)\n log_snr_next = self.log_snr(t_next)\n log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/19", "ground_truth": " super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 102, "lineno": 453, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n"} +{"prompt": "(min = eps))\n\ndef l2norm(t):\n return F.normalize(t, dim = -1)\n\ndef right_pad_dims_to(x, t):\n padding_dims = x.ndim - t.ndim\n if padding_dims <= 0:\n return t\n return t.view(*t.shape, *((1,) * padding_dims))\n\ndef masked_mean(t, *, dim, mask = None):\n if not exists(mask):\n return t.mean(dim = dim)\n\n denom = mask.sum(dim = dim, keepdim = True)\n mask = rearrange(mask, 'b n -> b n 1')\n masked_t = t.masked_fill(~mask, 0.)\n\n return masked_t.sum(dim = dim) / denom.clamp(min = 1e-5)\n\ndef resize_image_to(\n image,\n target_image_size,\n clamp_range = None,\n mode = 'nearest'\n):\n orig_image_size = image.shape[-1]\n\n if orig_image_size == target_image_size:\n return image\n\n out = F.interpolate(image, target_image_size, mode = mode)\n\n if exists(clamp_range):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):\n if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\n# gaussian diffusion with continuous time helper functions and classes\n# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py\n\n@torch.jit.script\ndef beta_linear_log_snr(t):\n return -torch.log(expm1(1e-4 + 10 * (t ** 2)))\n\n@torch.jit.script\ndef alpha_cosine_log_snr(t, s: float = 0.008):\n return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version\n\ndef log_snr_to_alpha_sigma(log_snr):\n return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))\n\nclass GaussianDiffusionContinuousTimes(nn.Module):\n def __init__(self, *, noise_schedule, timesteps = 1000):\n super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n\n def get_times(self, batch_size, noise_level, *, device):\n return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)\n\n def sample_random_times(self, batch_size, *, device):\n return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)\n\n def get_condition(self, times):\n return maybe(self.log_snr)(times)\n\n def get_sampling_timesteps(self, batch, *, device):\n times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)\n times = repeat(times, 't -> b t', b = batch)\n times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)\n times = times.unbind(dim = -1)\n return times\n\n def q_posterior(self, x_start, x_t, t, *, t_next = None):\n t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))\n\n \"\"\" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material \"\"\"\n log_snr = self.log_snr(t)\n log_snr_next = self.log_snr(t_next)\n log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/20", "ground_truth": " n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 133, "lineno": 475, "function_name": "forward"}, "groundtruth": " n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n"} +{"prompt": "):\n out = out.clamp(*clamp_range)\n\n return out\n\ndef calc_all_frame_dims(\n downsample_factors: List[int],\n frames\n):\n if not exists(frames):\n return (tuple(),) * len(downsample_factors)\n\n all_frame_dims = []\n\n for divisor in downsample_factors:\n assert divisible_by(frames, divisor)\n all_frame_dims.append((frames // divisor,))\n\n return all_frame_dims\n\ndef safe_get_tuple_index(tup, index, default = None):\n if len(tup) <= index:\n return default\n return tup[index]\n\n# image normalization functions\n# ddpms expect images to be in the range of -1 to 1\n\ndef normalize_neg_one_to_one(img):\n return img * 2 - 1\n\ndef unnormalize_zero_to_one(normed_img):\n return (normed_img + 1) * 0.5\n\n# classifier free guidance functions\n\ndef prob_mask_like(shape, prob, device):\n if prob == 1:\n return torch.ones(shape, device = device, dtype = torch.bool)\n elif prob == 0:\n return torch.zeros(shape, device = device, dtype = torch.bool)\n else:\n return torch.zeros(shape, device = device).float().uniform_(0, 1) < prob\n\n# gaussian diffusion with continuous time helper functions and classes\n# large part of this was thanks to @crowsonkb at https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/utils.py\n\n@torch.jit.script\ndef beta_linear_log_snr(t):\n return -torch.log(expm1(1e-4 + 10 * (t ** 2)))\n\n@torch.jit.script\ndef alpha_cosine_log_snr(t, s: float = 0.008):\n return -log((torch.cos((t + s) / (1 + s) * math.pi * 0.5) ** -2) - 1, eps = 1e-5) # not sure if this accounts for beta being clipped to 0.999 in discrete version\n\ndef log_snr_to_alpha_sigma(log_snr):\n return torch.sqrt(torch.sigmoid(log_snr)), torch.sqrt(torch.sigmoid(-log_snr))\n\nclass GaussianDiffusionContinuousTimes(nn.Module):\n def __init__(self, *, noise_schedule, timesteps = 1000):\n super().__init__()\n\n if noise_schedule == \"linear\":\n self.log_snr = beta_linear_log_snr\n elif noise_schedule == \"cosine\":\n self.log_snr = alpha_cosine_log_snr\n else:\n raise ValueError(f'invalid noise schedule {noise_schedule}')\n\n self.num_timesteps = timesteps\n\n def get_times(self, batch_size, noise_level, *, device):\n return torch.full((batch_size,), noise_level, device = device, dtype = torch.float32)\n\n def sample_random_times(self, batch_size, *, device):\n return torch.zeros((batch_size,), device = device).float().uniform_(0, 1)\n\n def get_condition(self, times):\n return maybe(self.log_snr)(times)\n\n def get_sampling_timesteps(self, batch, *, device):\n times = torch.linspace(1., 0., self.num_timesteps + 1, device = device)\n times = repeat(times, 't -> b t', b = batch)\n times = torch.stack((times[:, :-1], times[:, 1:]), dim = 0)\n times = times.unbind(dim = -1)\n return times\n\n def q_posterior(self, x_start, x_t, t, *, t_next = None):\n t_next = default(t_next, lambda: (t - 1. / self.num_timesteps).clamp(min = 0.))\n\n \"\"\" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material \"\"\"\n log_snr = self.log_snr(t)\n log_snr_next = self.log_snr(t_next)\n log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/21", "ground_truth": " super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 167, "lineno": 505, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n"} +{"prompt": "num_timesteps).clamp(min = 0.))\n\n \"\"\" https://openreview.net/attachment?id=2LdBqxc1Yv&name=supplementary_material \"\"\"\n log_snr = self.log_snr(t)\n log_snr_next = self.log_snr(t_next)\n log_snr, log_snr_next = map(partial(right_pad_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/22", "ground_truth": " super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 255, "lineno": 602, "function_name": "__init__"}, "groundtruth": " super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n"} +{"prompt": "_dims_to, x_t), (log_snr, log_snr_next))\n\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n alpha_next, sigma_next = log_snr_to_alpha_sigma(log_snr_next)\n\n # c - as defined near eq 33\n c = -expm1(log_snr - log_snr_next)\n posterior_mean = alpha_next * (x_t * (1 - c) / alpha + c * x_start)\n\n # following (eq. 33)\n posterior_variance = (sigma_next ** 2) * c\n posterior_log_variance_clipped = log(posterior_variance, eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):", "metadata": {"task_id": "lucidrains--imagen-pytorch/23", "ground_truth": " o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 260, "lineno": 615, "function_name": "init_conv_"}, "groundtruth": " o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n"} +{"prompt": " eps = 1e-20)\n return posterior_mean, posterior_variance, posterior_log_variance_clipped\n\n def q_sample(self, x_start, t, noise = None):\n dtype = x_start.dtype\n\n if isinstance(t, float):\n batch = x_start.shape[0]\n t = torch.full((batch,), t, device = x_start.device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_start))\n log_snr = self.log_snr(t).type(dtype)\n log_snr_padded_dim = right_pad_dims_to(x_start, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n return alpha * x_start + sigma * noise, log_snr, alpha, sigma\n\n def q_sample_from_to(self, x_from, from_t, to_t, noise = None):\n shape, device, dtype = x_from.shape, x_from.device, x_from.dtype\n batch = shape[0]\n\n if isinstance(from_t, float):\n from_t = torch.full((batch,), from_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle", "metadata": {"task_id": "lucidrains--imagen-pytorch/24", "ground_truth": " dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 271, "lineno": 629, "function_name": "Downsample"}, "groundtruth": " dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n"} +{"prompt": "_t, device = device, dtype = dtype)\n\n if isinstance(to_t, float):\n to_t = torch.full((batch,), to_t, device = device, dtype = dtype)\n\n noise = default(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):", "metadata": {"task_id": "lucidrains--imagen-pytorch/25", "ground_truth": " super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 293, "lineno": 652, "function_name": "__init__"}, "groundtruth": " super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n"} +{"prompt": "(noise, lambda: torch.randn_like(x_from))\n\n log_snr = self.log_snr(from_t)\n log_snr_padded_dim = right_pad_dims_to(x_from, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr_padded_dim)\n\n log_snr_to = self.log_snr(to_t)\n log_snr_padded_dim_to = right_pad_dims_to(x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):", "metadata": {"task_id": "lucidrains--imagen-pytorch/26", "ground_truth": " x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 298, "lineno": 658, "function_name": "forward"}, "groundtruth": " x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n"} +{"prompt": "x_from, log_snr_to)\n alpha_to, sigma_to = log_snr_to_alpha_sigma(log_snr_padded_dim_to)\n\n return x_from * (alpha_to / alpha) + noise * (sigma_to * alpha - sigma * alpha_to) / alpha\n\n def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/27", "ground_truth": " super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 305, "lineno": 672, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n"} +{"prompt": " def predict_start_from_v(self, x_t, t, v):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return alpha * x_t - sigma * v\n\n def predict_start_from_noise(self, x_t, t, noise):\n log_snr = self.log_snr(t)\n log_snr = right_pad_dims_to(x_t, log_snr)\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/28", "ground_truth": " x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 310, "lineno": 678, "function_name": "forward"}, "groundtruth": " x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n"} +{"prompt": ")\n alpha, sigma = log_snr_to_alpha_sigma(log_snr)\n return (x_t - sigma * noise) / alpha.clamp(min = 1e-8)\n\n# norms and residuals\n\nclass LayerNorm(nn.Module):\n def __init__(self, feats, stable = False, dim = -1):\n super().__init__()\n self.stable = stable\n self.dim = dim\n\n self.g = nn.Parameter(torch.ones(feats, *((1,) * (-dim - 1))))\n\n def forward(self, x):\n dtype, dim = x.dtype, self.dim\n\n if self.stable:\n x = x / x.amax(dim = dim, keepdim = True).detach()\n\n eps = 1e-5 if x.dtype == torch.float32 else 1e-3\n var = torch.var(x, dim = dim, unbiased = False, keepdim = True)\n mean = torch.mean(x, dim = dim, keepdim = True)\n\n return (x - mean) * (var + eps).rsqrt().type(dtype) * self.g.type(dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/29", "ground_truth": " super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 318, "lineno": 701, "function_name": "__init__"}, "groundtruth": " super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n"} +{"prompt": "dtype)\n\nChanLayerNorm = partial(LayerNorm, dim = -3)\n\nclass Always():\n def __init__(self, val):\n self.val = val\n\n def __call__(self, *args, **kwargs):\n return self.val\n\nclass Residual(nn.Module):\n def __init__(self, fn):\n super().__init__()\n self.fn = fn\n\n def forward(self, x, **kwargs):\n return self.fn(x, **kwargs) + x\n\nclass Parallel(nn.Module):\n def __init__(self, *fns):\n super().__init__()\n self.fns = nn.ModuleList(fns)\n\n def forward(self, x):\n outputs = [fn(x) for fn in self.fns]\n return sum(outputs)\n\n# attention pooling\n\nclass PerceiverAttention(nn.Module):\n def __init__(\n self,\n *,\n dim,\n dim_head = 64,\n heads = 8,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = nn.LayerNorm(dim)\n self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):\n super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/30", "ground_truth": " scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 342, "lineno": 732, "function_name": "forward"}, "groundtruth": " scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n"} +{"prompt": " self.norm_latents = nn.LayerNorm(dim)\n\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n nn.LayerNorm(dim)\n )\n\n def forward(self, x, latents, mask = None):\n x = self.norm(x)\n latents = self.norm_latents(latents)\n\n b, h = x.shape[0], self.heads\n\n q = self.to_q(latents)\n\n # the paper differs from Perceiver in which they also concat the key / values derived from the latents to be attended to\n kv_input = torch.cat((x, latents), dim = -2)\n k, v = self.to_kv(kv_input).chunk(2, dim = -1)\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = h)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities and masking\n\n sim = einsum('... i d, ... j d -> ... i j', q, k) * self.scale\n\n if exists(mask):\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = F.pad(mask, (0, latents.shape[-2]), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('... i j, ... j d -> ... i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)', h = h)\n return self.to_out(out)\n\nclass PerceiverResampler(nn.Module):\n def __init__(\n self,\n *,\n dim,\n depth,\n dim_head = 64,\n heads = 8,\n num_latents = 64,\n num_latents_mean_pooled = 4, # number of latents derived from mean pooled representation of the sequence\n max_seq_len = 512,\n ff_mult = 4\n ):\n super().__init__()\n self.pos_emb = nn.Embedding(max_seq_len, dim)\n\n self.latents = nn.Parameter(torch.randn(num_latents, dim))\n\n self.to_latents_from_mean_pooled_seq = None\n\n if num_latents_mean_pooled > 0:\n self.to_latents_from_mean_pooled_seq = nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, dim * num_latents_mean_pooled),\n Rearrange('b (n d) -> b n d', n = num_latents_mean_pooled)\n )\n\n self.layers = nn.ModuleList([])\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n PerceiverAttention(dim = dim, dim_head = dim_head, heads = heads),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, mask = None):\n n, device = x.shape[1], x.device\n pos_emb = self.pos_emb(torch.arange(n, device = device))\n\n x_with_pos = x + pos_emb\n\n latents = repeat(self.latents, 'n d -> b n d', b = x.shape[0])\n\n if exists(self.to_latents_from_mean_pooled_seq):\n meanpooled_seq = masked_mean(x, dim = 1, mask = torch.ones(x.shape[:2], device = x.device, dtype = torch.bool))\n meanpooled_latents = self.to_latents_from_mean_pooled_seq(meanpooled_seq)\n latents = torch.cat((meanpooled_latents, latents), dim = -2)\n\n for attn, ff in self.layers:\n latents = attn(x_with_pos, latents, mask = mask) + latents\n latents = ff(latents) + latents\n\n return latents\n\n# attention\n\nclass Attention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n dim_head = 64,\n heads = 8,\n context_dim = None,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n self.norm = LayerNorm(dim)\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, dim_head * 2)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context = None, mask = None, attn_bias = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n\n q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))\n\n q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b 1 d', b = b)\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # add text conditioning, if present\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n k = torch.cat((ck, k), dim = -2)\n v = torch.cat((cv, v), dim = -2)\n\n # qk rmsnorm\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # calculate query / key similarities\n\n sim = einsum('b h i d, b j d -> b h i j', q, k) * self.scale\n\n # relative positional encoding (T5 style)\n\n if exists(attn_bias):\n sim = sim + attn_bias\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n # attention\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n # aggregate values\n\n out = einsum('b h i j, b j d -> b h i d', attn, v)\n\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):\n super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/31", "ground_truth": " super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 388, "lineno": 765, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n"} +{"prompt": " -> b n (h d)')\n return self.to_out(out)\n\n# decoder\n\ndef Upsample(dim, dim_out = None):\n dim_out = default(dim_out, dim)\n\n return nn.Sequential(\n nn.Upsample(scale_factor = 2, mode = 'nearest'),\n nn.Conv2d(dim, dim_out, 3, padding = 1)\n )\n\nclass PixelShuffleUpsample(nn.Module):\n \"\"\"\n code shared by @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):\n super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # cosine sim attention\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass LinearCrossAttention(CrossAttention):\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> (b h) n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> (b h) 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # masking\n\n max_neg_value = -torch.finfo(x.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b n -> b n 1')\n k = k.masked_fill(~mask, max_neg_value)\n v = v.masked_fill(~mask, 0.)\n\n # linear attention\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)\n return self.to_out(out)\n\nclass LinearAttention(nn.Module):\n def __init__(\n self,\n dim,\n dim_head = 32,\n heads = 8,\n dropout = 0.05,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n inner_dim = dim_head * heads\n self.norm = ChanLayerNorm(dim)\n\n self.nonlin = nn.SiLU()\n\n self.to_q = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_k = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_v = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Conv2d(inner_dim, dim, 1, bias = False),\n ChanLayerNorm(dim)\n )\n\n def forward(self, fmap, context = None):\n h, x, y = self.heads, *fmap.shape[-2:]\n\n fmap = self.norm(fmap)\n q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))\n q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n ck, cv = rearrange_many((ck, cv), 'b n (h d) -> (b h) n d', h = h)\n k = torch.cat((k, ck), dim = -2)\n v = torch.cat((v, cv), dim = -2)\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)\n\n out = self.nonlin(out)\n return self.to_out(out)\n\nclass GlobalContext(nn.Module):\n \"\"\" basically a superior form of squeeze-excitation that is attention-esque \"\"\"\n\n def __init__(\n self,\n *,\n dim_in,\n dim_out\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/32", "ground_truth": " super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 583, "lineno": 949, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n"} +{"prompt": " @MalumaDev at DALLE2-pytorch for addressing checkboard artifacts\n https://arxiv.org/ftp/arxiv/papers/1707/1707.02937.pdf\n \"\"\"\n def __init__(self, dim, dim_out = None):\n super().__init__()\n dim_out = default(dim_out, dim)\n conv = nn.Conv2d(dim, dim_out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):\n super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # cosine sim attention\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass LinearCrossAttention(CrossAttention):\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> (b h) n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> (b h) 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # masking\n\n max_neg_value = -torch.finfo(x.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b n -> b n 1')\n k = k.masked_fill(~mask, max_neg_value)\n v = v.masked_fill(~mask, 0.)\n\n # linear attention\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)\n return self.to_out(out)\n\nclass LinearAttention(nn.Module):\n def __init__(\n self,\n dim,\n dim_head = 32,\n heads = 8,\n dropout = 0.05,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n inner_dim = dim_head * heads\n self.norm = ChanLayerNorm(dim)\n\n self.nonlin = nn.SiLU()\n\n self.to_q = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_k = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_v = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Conv2d(inner_dim, dim, 1, bias = False),\n ChanLayerNorm(dim)\n )\n\n def forward(self, fmap, context = None):\n h, x, y = self.heads, *fmap.shape[-2:]\n\n fmap = self.norm(fmap)\n q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))\n q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n ck, cv = rearrange_many((ck, cv), 'b n (h d) -> (b h) n d', h = h)\n k = torch.cat((k, ck), dim = -2)\n v = torch.cat((v, cv), dim = -2)\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)\n\n out = self.nonlin(out)\n return self.to_out(out)\n\nclass GlobalContext(nn.Module):\n \"\"\" basically a superior form of squeeze-excitation that is attention-esque \"\"\"\n\n def __init__(\n self,\n *,\n dim_in,\n dim_out\n ):\n super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n\n def forward(self, x):", "metadata": {"task_id": "lucidrains--imagen-pytorch/33", "ground_truth": " context = self.to_k(x)\n x, context = rearrange_many((x, context), 'b n ... -> b n (...)')\n out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)\n out = rearrange(out, '... -> ... 1')\n return self.net(out)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 598, "lineno": 961, "function_name": "forward"}, "groundtruth": " context = self.to_k(x)\n x, context = rearrange_many((x, context), 'b n ... -> b n (...)')\n out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)\n out = rearrange(out, '... -> ... 1')\n return self.net(out)\n"} +{"prompt": "out * 4, 1)\n\n self.net = nn.Sequential(\n conv,\n nn.SiLU(),\n nn.PixelShuffle(2)\n )\n\n self.init_conv_(conv)\n\n def init_conv_(self, conv):\n o, i, h, w = conv.weight.shape\n conv_weight = torch.empty(o // 4, i, h, w)\n nn.init.kaiming_uniform_(conv_weight)\n conv_weight = repeat(conv_weight, 'o ... -> (o 4) ...')\n\n conv.weight.data.copy_(conv_weight)\n nn.init.zeros_(conv.bias.data)\n\n def forward(self, x):\n return self.net(x)\n\ndef Downsample(dim, dim_out = None):\n # https://arxiv.org/abs/2208.03641 shows this is the most optimal way to downsample\n # named SP-conv in the paper, but basically a pixel unshuffle\n dim_out = default(dim_out, dim)\n return nn.Sequential(\n Rearrange('b c (h s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):\n super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # cosine sim attention\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass LinearCrossAttention(CrossAttention):\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> (b h) n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> (b h) 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # masking\n\n max_neg_value = -torch.finfo(x.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b n -> b n 1')\n k = k.masked_fill(~mask, max_neg_value)\n v = v.masked_fill(~mask, 0.)\n\n # linear attention\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)\n return self.to_out(out)\n\nclass LinearAttention(nn.Module):\n def __init__(\n self,\n dim,\n dim_head = 32,\n heads = 8,\n dropout = 0.05,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n inner_dim = dim_head * heads\n self.norm = ChanLayerNorm(dim)\n\n self.nonlin = nn.SiLU()\n\n self.to_q = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_k = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_v = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Conv2d(inner_dim, dim, 1, bias = False),\n ChanLayerNorm(dim)\n )\n\n def forward(self, fmap, context = None):\n h, x, y = self.heads, *fmap.shape[-2:]\n\n fmap = self.norm(fmap)\n q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))\n q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n ck, cv = rearrange_many((ck, cv), 'b n (h d) -> (b h) n d', h = h)\n k = torch.cat((k, ck), dim = -2)\n v = torch.cat((v, cv), dim = -2)\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)\n\n out = self.nonlin(out)\n return self.to_out(out)\n\nclass GlobalContext(nn.Module):\n \"\"\" basically a superior form of squeeze-excitation that is attention-esque \"\"\"\n\n def __init__(\n self,\n *,\n dim_in,\n dim_out\n ):\n super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n context = self.to_k(x)\n x, context = rearrange_many((x, context), 'b n ... -> b n (...)')\n out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)\n out = rearrange(out, '... -> ... 1')\n return self.net(out)\n\ndef FeedForward(dim, mult = 2):", "metadata": {"task_id": "lucidrains--imagen-pytorch/34", "ground_truth": " hidden_dim = int(dim * mult)\n return nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, hidden_dim, bias = False),\n nn.GELU(),\n LayerNorm(hidden_dim),\n nn.Linear(hidden_dim, dim, bias = False)\n )\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 604, "lineno": 968, "function_name": "FeedForward"}, "groundtruth": " hidden_dim = int(dim * mult)\n return nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, hidden_dim, bias = False),\n nn.GELU(),\n LayerNorm(hidden_dim),\n nn.Linear(hidden_dim, dim, bias = False)\n )\n"} +{"prompt": " s1) (w s2) -> b (c s1 s2) h w', s1 = 2, s2 = 2),\n nn.Conv2d(dim * 4, dim_out, 1)\n )\n\nclass SinusoidalPosEmb(nn.Module):\n def __init__(self, dim):\n super().__init__()\n self.dim = dim\n\n def forward(self, x):\n half_dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):\n super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # cosine sim attention\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass LinearCrossAttention(CrossAttention):\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> (b h) n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> (b h) 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # masking\n\n max_neg_value = -torch.finfo(x.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b n -> b n 1')\n k = k.masked_fill(~mask, max_neg_value)\n v = v.masked_fill(~mask, 0.)\n\n # linear attention\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)\n return self.to_out(out)\n\nclass LinearAttention(nn.Module):\n def __init__(\n self,\n dim,\n dim_head = 32,\n heads = 8,\n dropout = 0.05,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n inner_dim = dim_head * heads\n self.norm = ChanLayerNorm(dim)\n\n self.nonlin = nn.SiLU()\n\n self.to_q = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_k = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_v = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Conv2d(inner_dim, dim, 1, bias = False),\n ChanLayerNorm(dim)\n )\n\n def forward(self, fmap, context = None):\n h, x, y = self.heads, *fmap.shape[-2:]\n\n fmap = self.norm(fmap)\n q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))\n q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n ck, cv = rearrange_many((ck, cv), 'b n (h d) -> (b h) n d', h = h)\n k = torch.cat((k, ck), dim = -2)\n v = torch.cat((v, cv), dim = -2)\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)\n\n out = self.nonlin(out)\n return self.to_out(out)\n\nclass GlobalContext(nn.Module):\n \"\"\" basically a superior form of squeeze-excitation that is attention-esque \"\"\"\n\n def __init__(\n self,\n *,\n dim_in,\n dim_out\n ):\n super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n context = self.to_k(x)\n x, context = rearrange_many((x, context), 'b n ... -> b n (...)')\n out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)\n out = rearrange(out, '... -> ... 1')\n return self.net(out)\n\ndef FeedForward(dim, mult = 2):\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, hidden_dim, bias = False),\n nn.GELU(),\n LayerNorm(hidden_dim),\n nn.Linear(hidden_dim, dim, bias = False)\n )\n\ndef ChanFeedForward(dim, mult = 2): # in paper, it seems for self attention layers they did feedforwards with twice channel width\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n ChanLayerNorm(dim),\n nn.Conv2d(dim, hidden_dim, 1, bias = False),\n nn.GELU(),\n ChanLayerNorm(hidden_dim),\n nn.Conv2d(hidden_dim, dim, 1, bias = False)\n )\n\nclass TransformerBlock(nn.Module):\n def __init__(\n self,\n dim,\n *,\n depth = 1,\n heads = 8,\n dim_head = 32,\n ff_mult = 2,\n context_dim = None\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/35", "ground_truth": " super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 631, "lineno": 998, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n"} +{"prompt": "dim = self.dim // 2\n emb = math.log(10000) / (half_dim - 1)\n emb = torch.exp(torch.arange(half_dim, device = x.device) * -emb)\n emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')\n return torch.cat((emb.sin(), emb.cos()), dim = -1)\n\nclass LearnedSinusoidalPosEmb(nn.Module):\n \"\"\" following @crowsonkb 's lead with learned sinusoidal pos emb \"\"\"\n \"\"\" https://github.com/crowsonkb/v-diffusion-jax/blob/master/diffusion/models/danbooru_128.py#L8 \"\"\"\n\n def __init__(self, dim):\n super().__init__()\n assert (dim % 2) == 0\n half_dim = dim // 2\n self.weights = nn.Parameter(torch.randn(half_dim))\n\n def forward(self, x):\n x = rearrange(x, 'b -> b 1')\n freqs = x * rearrange(self.weights, 'd -> 1 d') * 2 * math.pi\n fouriered = torch.cat((freqs.sin(), freqs.cos()), dim = -1)\n fouriered = torch.cat((x, fouriered), dim = -1)\n return fouriered\n\nclass Block(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n groups = 8,\n norm = True\n ):\n super().__init__()\n self.groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):\n super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # cosine sim attention\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass LinearCrossAttention(CrossAttention):\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> (b h) n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> (b h) 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # masking\n\n max_neg_value = -torch.finfo(x.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b n -> b n 1')\n k = k.masked_fill(~mask, max_neg_value)\n v = v.masked_fill(~mask, 0.)\n\n # linear attention\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)\n return self.to_out(out)\n\nclass LinearAttention(nn.Module):\n def __init__(\n self,\n dim,\n dim_head = 32,\n heads = 8,\n dropout = 0.05,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n inner_dim = dim_head * heads\n self.norm = ChanLayerNorm(dim)\n\n self.nonlin = nn.SiLU()\n\n self.to_q = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_k = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_v = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Conv2d(inner_dim, dim, 1, bias = False),\n ChanLayerNorm(dim)\n )\n\n def forward(self, fmap, context = None):\n h, x, y = self.heads, *fmap.shape[-2:]\n\n fmap = self.norm(fmap)\n q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))\n q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n ck, cv = rearrange_many((ck, cv), 'b n (h d) -> (b h) n d', h = h)\n k = torch.cat((k, ck), dim = -2)\n v = torch.cat((v, cv), dim = -2)\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)\n\n out = self.nonlin(out)\n return self.to_out(out)\n\nclass GlobalContext(nn.Module):\n \"\"\" basically a superior form of squeeze-excitation that is attention-esque \"\"\"\n\n def __init__(\n self,\n *,\n dim_in,\n dim_out\n ):\n super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n context = self.to_k(x)\n x, context = rearrange_many((x, context), 'b n ... -> b n (...)')\n out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)\n out = rearrange(out, '... -> ... 1')\n return self.net(out)\n\ndef FeedForward(dim, mult = 2):\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, hidden_dim, bias = False),\n nn.GELU(),\n LayerNorm(hidden_dim),\n nn.Linear(hidden_dim, dim, bias = False)\n )\n\ndef ChanFeedForward(dim, mult = 2): # in paper, it seems for self attention layers they did feedforwards with twice channel width\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n ChanLayerNorm(dim),\n nn.Conv2d(dim, hidden_dim, 1, bias = False),\n nn.GELU(),\n ChanLayerNorm(hidden_dim),\n nn.Conv2d(hidden_dim, dim, 1, bias = False)\n )\n\nclass TransformerBlock(nn.Module):\n def __init__(\n self,\n dim,\n *,\n depth = 1,\n heads = 8,\n dim_head = 32,\n ff_mult = 2,\n context_dim = None\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, context = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/36", "ground_truth": " x = rearrange(x, 'b c h w -> b h w c')\n x, ps = pack([x], 'b * c')\n\n for attn, ff in self.layers:\n x = attn(x, context = context) + x\n x = ff(x) + x\n\n x, = unpack(x, ps, 'b * c')\n x = rearrange(x, 'b h w c -> b c h w')\n return x\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 641, "lineno": 1008, "function_name": "forward"}, "groundtruth": " x = rearrange(x, 'b c h w -> b h w c')\n x, ps = pack([x], 'b * c')\n\n for attn, ff in self.layers:\n x = attn(x, context = context) + x\n x = ff(x) + x\n\n x, = unpack(x, ps, 'b * c')\n x = rearrange(x, 'b h w c -> b c h w')\n return x\n"} +{"prompt": ".groupnorm = nn.GroupNorm(groups, dim) if norm else Identity()\n self.activation = nn.SiLU()\n self.project = nn.Conv2d(dim, dim_out, 3, padding = 1)\n\n def forward(self, x, scale_shift = None):\n x = self.groupnorm(x)\n\n if exists(scale_shift):\n scale, shift = scale_shift\n x = x * (scale + 1) + shift\n\n x = self.activation(x)\n return self.project(x)\n\nclass ResnetBlock(nn.Module):\n def __init__(\n self,\n dim,\n dim_out,\n *,\n cond_dim = None,\n time_cond_dim = None,\n groups = 8,\n linear_attn = False,\n use_gca = False,\n squeeze_excite = False,\n **attn_kwargs\n ):\n super().__init__()\n\n self.time_mlp = None\n\n if exists(time_cond_dim):\n self.time_mlp = nn.Sequential(\n nn.SiLU(),\n nn.Linear(time_cond_dim, dim_out * 2)\n )\n\n self.cross_attn = None\n\n if exists(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # cosine sim attention\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass LinearCrossAttention(CrossAttention):\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> (b h) n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> (b h) 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # masking\n\n max_neg_value = -torch.finfo(x.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b n -> b n 1')\n k = k.masked_fill(~mask, max_neg_value)\n v = v.masked_fill(~mask, 0.)\n\n # linear attention\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)\n return self.to_out(out)\n\nclass LinearAttention(nn.Module):\n def __init__(\n self,\n dim,\n dim_head = 32,\n heads = 8,\n dropout = 0.05,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n inner_dim = dim_head * heads\n self.norm = ChanLayerNorm(dim)\n\n self.nonlin = nn.SiLU()\n\n self.to_q = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_k = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_v = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Conv2d(inner_dim, dim, 1, bias = False),\n ChanLayerNorm(dim)\n )\n\n def forward(self, fmap, context = None):\n h, x, y = self.heads, *fmap.shape[-2:]\n\n fmap = self.norm(fmap)\n q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))\n q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n ck, cv = rearrange_many((ck, cv), 'b n (h d) -> (b h) n d', h = h)\n k = torch.cat((k, ck), dim = -2)\n v = torch.cat((v, cv), dim = -2)\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)\n\n out = self.nonlin(out)\n return self.to_out(out)\n\nclass GlobalContext(nn.Module):\n \"\"\" basically a superior form of squeeze-excitation that is attention-esque \"\"\"\n\n def __init__(\n self,\n *,\n dim_in,\n dim_out\n ):\n super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n context = self.to_k(x)\n x, context = rearrange_many((x, context), 'b n ... -> b n (...)')\n out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)\n out = rearrange(out, '... -> ... 1')\n return self.net(out)\n\ndef FeedForward(dim, mult = 2):\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, hidden_dim, bias = False),\n nn.GELU(),\n LayerNorm(hidden_dim),\n nn.Linear(hidden_dim, dim, bias = False)\n )\n\ndef ChanFeedForward(dim, mult = 2): # in paper, it seems for self attention layers they did feedforwards with twice channel width\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n ChanLayerNorm(dim),\n nn.Conv2d(dim, hidden_dim, 1, bias = False),\n nn.GELU(),\n ChanLayerNorm(hidden_dim),\n nn.Conv2d(hidden_dim, dim, 1, bias = False)\n )\n\nclass TransformerBlock(nn.Module):\n def __init__(\n self,\n dim,\n *,\n depth = 1,\n heads = 8,\n dim_head = 32,\n ff_mult = 2,\n context_dim = None\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, context = None):\n x = rearrange(x, 'b c h w -> b h w c')\n x, ps = pack([x], 'b * c')\n\n for attn, ff in self.layers:\n x = attn(x, context = context) + x\n x = ff(x) + x\n\n x, = unpack(x, ps, 'b * c')\n x = rearrange(x, 'b h w c -> b c h w')\n return x\n\nclass LinearAttentionTransformerBlock(nn.Module):\n def __init__(\n self,\n dim,\n *,\n depth = 1,\n heads = 8,\n dim_head = 32,\n ff_mult = 2,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n LinearAttention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n ChanFeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, context = None):\n for attn, ff in self.layers:\n x = attn(x, context = context) + x\n x = ff(x) + x\n return x\n\nclass CrossEmbedLayer(nn.Module):\n def __init__(\n self,\n dim_in,\n kernel_sizes,\n dim_out = None,\n stride = 2\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/37", "ground_truth": " super().__init__()\n assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])\n dim_out = default(dim_out, dim_in)\n\n kernel_sizes = sorted(kernel_sizes)\n num_scales = len(kernel_sizes)\n\n # calculate the dimension at each scale\n dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]\n dim_scales = [*dim_scales, dim_out - sum(dim_scales)]\n\n self.convs = nn.ModuleList([])\n for kernel, dim_scale in zip(kernel_sizes, dim_scales):\n self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 673, "lineno": 1054, "function_name": "__init__"}, "groundtruth": " super().__init__()\n assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])\n dim_out = default(dim_out, dim_in)\n\n kernel_sizes = sorted(kernel_sizes)\n num_scales = len(kernel_sizes)\n\n # calculate the dimension at each scale\n dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]\n dim_scales = [*dim_scales, dim_out - sum(dim_scales)]\n\n self.convs = nn.ModuleList([])\n for kernel, dim_scale in zip(kernel_sizes, dim_scales):\n self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))\n"} +{"prompt": "(cond_dim):\n attn_klass = CrossAttention if not linear_attn else LinearCrossAttention\n\n self.cross_attn = attn_klass(\n dim = dim_out,\n context_dim = cond_dim,\n **attn_kwargs\n )\n\n self.block1 = Block(dim, dim_out, groups = groups)\n self.block2 = Block(dim_out, dim_out, groups = groups)\n\n self.gca = GlobalContext(dim_in = dim_out, dim_out = dim_out) if use_gca else Always(1)\n\n self.res_conv = nn.Conv2d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # cosine sim attention\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass LinearCrossAttention(CrossAttention):\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> (b h) n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> (b h) 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # masking\n\n max_neg_value = -torch.finfo(x.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b n -> b n 1')\n k = k.masked_fill(~mask, max_neg_value)\n v = v.masked_fill(~mask, 0.)\n\n # linear attention\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)\n return self.to_out(out)\n\nclass LinearAttention(nn.Module):\n def __init__(\n self,\n dim,\n dim_head = 32,\n heads = 8,\n dropout = 0.05,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n inner_dim = dim_head * heads\n self.norm = ChanLayerNorm(dim)\n\n self.nonlin = nn.SiLU()\n\n self.to_q = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_k = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_v = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Conv2d(inner_dim, dim, 1, bias = False),\n ChanLayerNorm(dim)\n )\n\n def forward(self, fmap, context = None):\n h, x, y = self.heads, *fmap.shape[-2:]\n\n fmap = self.norm(fmap)\n q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))\n q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n ck, cv = rearrange_many((ck, cv), 'b n (h d) -> (b h) n d', h = h)\n k = torch.cat((k, ck), dim = -2)\n v = torch.cat((v, cv), dim = -2)\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)\n\n out = self.nonlin(out)\n return self.to_out(out)\n\nclass GlobalContext(nn.Module):\n \"\"\" basically a superior form of squeeze-excitation that is attention-esque \"\"\"\n\n def __init__(\n self,\n *,\n dim_in,\n dim_out\n ):\n super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n context = self.to_k(x)\n x, context = rearrange_many((x, context), 'b n ... -> b n (...)')\n out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)\n out = rearrange(out, '... -> ... 1')\n return self.net(out)\n\ndef FeedForward(dim, mult = 2):\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, hidden_dim, bias = False),\n nn.GELU(),\n LayerNorm(hidden_dim),\n nn.Linear(hidden_dim, dim, bias = False)\n )\n\ndef ChanFeedForward(dim, mult = 2): # in paper, it seems for self attention layers they did feedforwards with twice channel width\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n ChanLayerNorm(dim),\n nn.Conv2d(dim, hidden_dim, 1, bias = False),\n nn.GELU(),\n ChanLayerNorm(hidden_dim),\n nn.Conv2d(hidden_dim, dim, 1, bias = False)\n )\n\nclass TransformerBlock(nn.Module):\n def __init__(\n self,\n dim,\n *,\n depth = 1,\n heads = 8,\n dim_head = 32,\n ff_mult = 2,\n context_dim = None\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, context = None):\n x = rearrange(x, 'b c h w -> b h w c')\n x, ps = pack([x], 'b * c')\n\n for attn, ff in self.layers:\n x = attn(x, context = context) + x\n x = ff(x) + x\n\n x, = unpack(x, ps, 'b * c')\n x = rearrange(x, 'b h w c -> b c h w')\n return x\n\nclass LinearAttentionTransformerBlock(nn.Module):\n def __init__(\n self,\n dim,\n *,\n depth = 1,\n heads = 8,\n dim_head = 32,\n ff_mult = 2,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n LinearAttention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n ChanFeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, context = None):\n for attn, ff in self.layers:\n x = attn(x, context = context) + x\n x = ff(x) + x\n return x\n\nclass CrossEmbedLayer(nn.Module):\n def __init__(\n self,\n dim_in,\n kernel_sizes,\n dim_out = None,\n stride = 2\n ):\n super().__init__()\n assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])\n dim_out = default(dim_out, dim_in)\n\n kernel_sizes = sorted(kernel_sizes)\n num_scales = len(kernel_sizes)\n\n # calculate the dimension at each scale\n dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]\n dim_scales = [*dim_scales, dim_out - sum(dim_scales)]\n\n self.convs = nn.ModuleList([])\n for kernel, dim_scale in zip(kernel_sizes, dim_scales):\n self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))\n\n def forward(self, x):\n fmaps = tuple(map(lambda conv: conv(x), self.convs))\n return torch.cat(fmaps, dim = 1)\n\nclass UpsampleCombiner(nn.Module):\n def __init__(\n self,\n dim,\n *,\n enabled = False,\n dim_ins = tuple(),\n dim_outs = tuple()\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/38", "ground_truth": " super().__init__()\n dim_outs = cast_tuple(dim_outs, len(dim_ins))\n assert len(dim_ins) == len(dim_outs)\n\n self.enabled = enabled\n\n if not self.enabled:\n self.dim_out = dim\n return\n\n self.fmap_convs = nn.ModuleList([Block(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])\n self.dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 713, "lineno": 1082, "function_name": "__init__"}, "groundtruth": " super().__init__()\n dim_outs = cast_tuple(dim_outs, len(dim_ins))\n assert len(dim_ins) == len(dim_outs)\n\n self.enabled = enabled\n\n if not self.enabled:\n self.dim_out = dim\n return\n\n self.fmap_convs = nn.ModuleList([Block(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])\n self.dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)\n"} +{"prompt": "d(dim, dim_out, 1) if dim != dim_out else Identity()\n\n\n def forward(self, x, time_emb = None, cond = None):\n\n scale_shift = None\n if exists(self.time_mlp) and exists(time_emb):\n time_emb = self.time_mlp(time_emb)\n time_emb = rearrange(time_emb, 'b c -> b c 1 1')\n scale_shift = time_emb.chunk(2, dim = 1)\n\n h = self.block1(x)\n\n if exists(self.cross_attn):\n assert exists(cond)\n h = rearrange(h, 'b c h w -> b h w c')\n h, ps = pack([h], 'b * c')\n h = self.cross_attn(h, context = cond) + h\n h, = unpack(h, ps, 'b * c')\n h = rearrange(h, 'b h w c -> b c h w')\n\n h = self.block2(h, scale_shift = scale_shift)\n\n h = h * self.gca(h)\n\n return h + self.res_conv(x)\n\nclass CrossAttention(nn.Module):\n def __init__(\n self,\n dim,\n *,\n context_dim = None,\n dim_head = 64,\n heads = 8,\n norm_context = False,\n scale = 8\n ):\n super().__init__()\n self.scale = scale\n\n self.heads = heads\n inner_dim = dim_head * heads\n\n context_dim = default(context_dim, dim)\n\n self.norm = LayerNorm(dim)\n self.norm_context = LayerNorm(context_dim) if norm_context else Identity()\n\n self.null_kv = nn.Parameter(torch.randn(2, dim_head))\n self.to_q = nn.Linear(dim, inner_dim, bias = False)\n self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)\n\n self.q_scale = nn.Parameter(torch.ones(dim_head))\n self.k_scale = nn.Parameter(torch.ones(dim_head))\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, dim, bias = False),\n LayerNorm(dim)\n )\n\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> b h n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> b h 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # cosine sim attention\n\n q, k = map(l2norm, (q, k))\n q = q * self.q_scale\n k = k * self.k_scale\n\n # similarities\n\n sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale\n\n # masking\n\n max_neg_value = -torch.finfo(sim.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b j -> b 1 1 j')\n sim = sim.masked_fill(~mask, max_neg_value)\n\n attn = sim.softmax(dim = -1, dtype = torch.float32)\n attn = attn.to(sim.dtype)\n\n out = einsum('b h i j, b h j d -> b h i d', attn, v)\n out = rearrange(out, 'b h n d -> b n (h d)')\n return self.to_out(out)\n\nclass LinearCrossAttention(CrossAttention):\n def forward(self, x, context, mask = None):\n b, n, device = *x.shape[:2], x.device\n\n x = self.norm(x)\n context = self.norm_context(context)\n\n q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))\n\n q, k, v = rearrange_many((q, k, v), 'b n (h d) -> (b h) n d', h = self.heads)\n\n # add null key / value for classifier free guidance in prior net\n\n nk, nv = repeat_many(self.null_kv.unbind(dim = -2), 'd -> (b h) 1 d', h = self.heads, b = b)\n\n k = torch.cat((nk, k), dim = -2)\n v = torch.cat((nv, v), dim = -2)\n\n # masking\n\n max_neg_value = -torch.finfo(x.dtype).max\n\n if exists(mask):\n mask = F.pad(mask, (1, 0), value = True)\n mask = rearrange(mask, 'b n -> b n 1')\n k = k.masked_fill(~mask, max_neg_value)\n v = v.masked_fill(~mask, 0.)\n\n # linear attention\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) n d -> b n (h d)', h = self.heads)\n return self.to_out(out)\n\nclass LinearAttention(nn.Module):\n def __init__(\n self,\n dim,\n dim_head = 32,\n heads = 8,\n dropout = 0.05,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.scale = dim_head ** -0.5\n self.heads = heads\n inner_dim = dim_head * heads\n self.norm = ChanLayerNorm(dim)\n\n self.nonlin = nn.SiLU()\n\n self.to_q = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_k = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_v = nn.Sequential(\n nn.Dropout(dropout),\n nn.Conv2d(dim, inner_dim, 1, bias = False),\n nn.Conv2d(inner_dim, inner_dim, 3, bias = False, padding = 1, groups = inner_dim)\n )\n\n self.to_context = nn.Sequential(nn.LayerNorm(context_dim), nn.Linear(context_dim, inner_dim * 2, bias = False)) if exists(context_dim) else None\n\n self.to_out = nn.Sequential(\n nn.Conv2d(inner_dim, dim, 1, bias = False),\n ChanLayerNorm(dim)\n )\n\n def forward(self, fmap, context = None):\n h, x, y = self.heads, *fmap.shape[-2:]\n\n fmap = self.norm(fmap)\n q, k, v = map(lambda fn: fn(fmap), (self.to_q, self.to_k, self.to_v))\n q, k, v = rearrange_many((q, k, v), 'b (h c) x y -> (b h) (x y) c', h = h)\n\n if exists(context):\n assert exists(self.to_context)\n ck, cv = self.to_context(context).chunk(2, dim = -1)\n ck, cv = rearrange_many((ck, cv), 'b n (h d) -> (b h) n d', h = h)\n k = torch.cat((k, ck), dim = -2)\n v = torch.cat((v, cv), dim = -2)\n\n q = q.softmax(dim = -1)\n k = k.softmax(dim = -2)\n\n q = q * self.scale\n\n context = einsum('b n d, b n e -> b d e', k, v)\n out = einsum('b n d, b d e -> b n e', q, context)\n out = rearrange(out, '(b h) (x y) d -> b (h d) x y', h = h, x = x, y = y)\n\n out = self.nonlin(out)\n return self.to_out(out)\n\nclass GlobalContext(nn.Module):\n \"\"\" basically a superior form of squeeze-excitation that is attention-esque \"\"\"\n\n def __init__(\n self,\n *,\n dim_in,\n dim_out\n ):\n super().__init__()\n self.to_k = nn.Conv2d(dim_in, 1, 1)\n hidden_dim = max(3, dim_out // 2)\n\n self.net = nn.Sequential(\n nn.Conv2d(dim_in, hidden_dim, 1),\n nn.SiLU(),\n nn.Conv2d(hidden_dim, dim_out, 1),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n context = self.to_k(x)\n x, context = rearrange_many((x, context), 'b n ... -> b n (...)')\n out = einsum('b i n, b c n -> b c i', context.softmax(dim = -1), x)\n out = rearrange(out, '... -> ... 1')\n return self.net(out)\n\ndef FeedForward(dim, mult = 2):\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n LayerNorm(dim),\n nn.Linear(dim, hidden_dim, bias = False),\n nn.GELU(),\n LayerNorm(hidden_dim),\n nn.Linear(hidden_dim, dim, bias = False)\n )\n\ndef ChanFeedForward(dim, mult = 2): # in paper, it seems for self attention layers they did feedforwards with twice channel width\n hidden_dim = int(dim * mult)\n return nn.Sequential(\n ChanLayerNorm(dim),\n nn.Conv2d(dim, hidden_dim, 1, bias = False),\n nn.GELU(),\n ChanLayerNorm(hidden_dim),\n nn.Conv2d(hidden_dim, dim, 1, bias = False)\n )\n\nclass TransformerBlock(nn.Module):\n def __init__(\n self,\n dim,\n *,\n depth = 1,\n heads = 8,\n dim_head = 32,\n ff_mult = 2,\n context_dim = None\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n Attention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n FeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, context = None):\n x = rearrange(x, 'b c h w -> b h w c')\n x, ps = pack([x], 'b * c')\n\n for attn, ff in self.layers:\n x = attn(x, context = context) + x\n x = ff(x) + x\n\n x, = unpack(x, ps, 'b * c')\n x = rearrange(x, 'b h w c -> b c h w')\n return x\n\nclass LinearAttentionTransformerBlock(nn.Module):\n def __init__(\n self,\n dim,\n *,\n depth = 1,\n heads = 8,\n dim_head = 32,\n ff_mult = 2,\n context_dim = None,\n **kwargs\n ):\n super().__init__()\n self.layers = nn.ModuleList([])\n\n for _ in range(depth):\n self.layers.append(nn.ModuleList([\n LinearAttention(dim = dim, heads = heads, dim_head = dim_head, context_dim = context_dim),\n ChanFeedForward(dim = dim, mult = ff_mult)\n ]))\n\n def forward(self, x, context = None):\n for attn, ff in self.layers:\n x = attn(x, context = context) + x\n x = ff(x) + x\n return x\n\nclass CrossEmbedLayer(nn.Module):\n def __init__(\n self,\n dim_in,\n kernel_sizes,\n dim_out = None,\n stride = 2\n ):\n super().__init__()\n assert all([*map(lambda t: (t % 2) == (stride % 2), kernel_sizes)])\n dim_out = default(dim_out, dim_in)\n\n kernel_sizes = sorted(kernel_sizes)\n num_scales = len(kernel_sizes)\n\n # calculate the dimension at each scale\n dim_scales = [int(dim_out / (2 ** i)) for i in range(1, num_scales)]\n dim_scales = [*dim_scales, dim_out - sum(dim_scales)]\n\n self.convs = nn.ModuleList([])\n for kernel, dim_scale in zip(kernel_sizes, dim_scales):\n self.convs.append(nn.Conv2d(dim_in, dim_scale, kernel, stride = stride, padding = (kernel - stride) // 2))\n\n def forward(self, x):\n fmaps = tuple(map(lambda conv: conv(x), self.convs))\n return torch.cat(fmaps, dim = 1)\n\nclass UpsampleCombiner(nn.Module):\n def __init__(\n self,\n dim,\n *,\n enabled = False,\n dim_ins = tuple(),\n dim_outs = tuple()\n ):\n super().__init__()\n dim_outs = cast_tuple(dim_outs, len(dim_ins))\n assert len(dim_ins) == len(dim_outs)\n\n self.enabled = enabled\n\n if not self.enabled:\n self.dim_out = dim\n return\n\n self.fmap_convs = nn.ModuleList([Block(dim_in, dim_out) for dim_in, dim_out in zip(dim_ins, dim_outs)])\n self.dim_out = dim + (sum(dim_outs) if len(dim_outs) > 0 else 0)\n\n def forward(self, x, fmaps = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/39", "ground_truth": " target_size = x.shape[-1]\n\n fmaps = default(fmaps, tuple())\n\n if not self.enabled or len(fmaps) == 0 or len(self.fmap_convs) == 0:\n return x\n\n fmaps = [resize_image_to(fmap, target_size) for fmap in fmaps]\n outs = [conv(fmap) for fmap, conv in zip(fmaps, self.fmap_convs)]\n return torch.cat((x, *outs), dim = 1)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 727, "lineno": 1096, "function_name": "forward"}, "groundtruth": " target_size = x.shape[-1]\n\n fmaps = default(fmaps, tuple())\n\n if not self.enabled or len(fmaps) == 0 or len(self.fmap_convs) == 0:\n return x\n\n fmaps = [resize_image_to(fmap, target_size) for fmap in fmaps]\n outs = [conv(fmap) for fmap, conv in zip(fmaps, self.fmap_convs)]\n return torch.cat((x, *outs), dim = 1)\n"} +{"prompt": " # kernel size of initial conv, if not using cross embed\n init_cross_embed = True,\n init_cross_embed_kernel_sizes = (3, 7, 15),\n cross_embed_downsample = False,\n cross_embed_downsample_kernel_sizes = (2, 4),\n attn_pool_text = True,\n attn_pool_num_latents = 32,\n dropout = 0.,\n memory_efficient = False,\n init_conv_to_final_conv_residual = False,\n use_global_context_attn = True,\n scale_skip_connection = True,\n final_resnet_block = True,\n final_conv_kernel_size = 3,\n self_cond = False,\n resize_mode = 'nearest',\n combine_upsample_fmaps = False, # combine feature maps from all upsample blocks, used in unet squared successfully\n pixel_shuffle_upsample = True, # may address checkboard artifacts\n ):\n super().__init__()\n\n # guide researchers\n\n assert attn_heads > 1, 'you need to have more than 1 attention head, ideally at least 4 or 8'\n\n if dim < 128:\n print_once('The base dimension of your u-net should ideally be no smaller than 128, as recommended by a professional DDPM trainer https://nonint.com/2022/05/04/friends-dont-let-friends-train-small-diffusion-models/')\n\n # save locals to take care of some hyperparameters for cascading DDPM\n\n self._locals = locals()\n self._locals.pop('self', None)\n self._locals.pop('__class__', None)\n\n # determine dimensions\n\n self.channels = channels\n self.channels_out = default(channels_out, channels)\n\n # (1) in cascading diffusion, one concats the low resolution image, blurred, for conditioning the higher resolution synthesis\n # (2) in self conditioning, one appends the predict x0 (x_start)\n init_channels = channels * (1 + int(lowres_cond) + int(self_cond))\n init_dim = default(init_dim, dim)\n\n self.self_cond = self_cond\n\n # optional image conditioning\n\n self.has_cond_image = cond_images_channels > 0\n self.cond_images_channels = cond_images_channels\n\n init_channels += cond_images_channels\n\n # initial convolution\n\n self.init_conv = CrossEmbedLayer(init_channels, dim_out = init_dim, kernel_sizes = init_cross_embed_kernel_sizes, stride = 1) if init_cross_embed else nn.Conv2d(init_channels, init_dim, init_conv_kernel_size, padding = init_conv_kernel_size // 2)\n\n dims = [init_dim, *map(lambda m: dim * m, dim_mults)]\n in_out = list(zip(dims[:-1], dims[1:]))\n\n # time conditioning\n\n cond_dim = default(cond_dim, dim)\n time_cond_dim = dim * 4 * (2 if lowres_cond else 1)\n\n # embedding time for log(snr) noise from continuous version\n\n sinu_pos_emb = LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim)\n sinu_pos_emb_input_dim = learned_sinu_pos_emb_dim + 1\n\n self.to_time_hiddens = nn.Sequential(\n sinu_pos_emb,\n nn.Linear(sinu_pos_emb_input_dim, time_cond_dim),\n nn.SiLU()\n )\n\n self.to_time_cond = nn.Sequential(\n nn.Linear(time_cond_dim, time_cond_dim)\n )\n\n # project to time tokens as well as time hiddens\n\n self.to_time_tokens = nn.Sequential(\n nn.Linear(time_cond_dim, cond_dim * num_time_tokens),\n Rearrange('b (r d) -> b r d', r = num_time_tokens)\n )\n\n # low res aug noise conditioning\n\n self.lowres_cond = lowres_cond\n\n if lowres_cond:\n self.to_lowres_time_hiddens = nn.Sequential(\n LearnedSinusoidalPosEmb(learned_sinu_pos_emb_dim),\n nn.Linear(learned_sinu_pos_emb_dim + 1, time_cond_dim),\n nn.SiLU()\n )\n\n self.to_lowres_time_cond = nn.Sequential(\n nn.Linear(time_cond_dim, time_cond_dim)\n )\n\n self.to_lowres_time_tokens = nn.Sequential(\n nn.Linear(time_cond_dim, cond_dim * num_time_tokens),\n Rearrange('b (r d) -> b r d', r = num_time_tokens)\n )\n\n # normalizations\n\n self.norm_cond = nn.LayerNorm(cond_dim)\n\n # text encoding conditioning (optional)\n\n self.text_to_cond = None\n\n if cond_on_text:\n assert exists(text_embed_dim), 'text_embed_dim must be given to the unet if cond_on_text is True'\n self.text_to_cond = nn.Linear(text_embed_dim, cond_dim)\n\n # finer control over whether to condition on text encodings\n\n self.cond_on_text = cond_on_text\n\n # attention pooling\n\n self.attn_pool = PerceiverResampler(dim = cond_dim, depth = 2, dim_head = attn_dim_head, heads = attn_heads, num_latents = attn_pool_num_latents) if attn_pool_text else None\n\n # for classifier free guidance\n\n self.max_text_len = max_text_len\n\n self.null_text_embed = nn.Parameter(torch.randn(1, max_text_len, cond_dim))\n self.null_text_hidden = nn.Parameter(torch.randn(1, time_cond_dim))\n\n # for non-attention based text conditioning at all points in the network where time is also conditioned\n\n self.to_text_non_attn_cond = None\n\n if cond_on_text:\n self.to_text_non_attn_cond = nn.Sequential(\n nn.LayerNorm(cond_dim),\n nn.Linear(cond_dim, time_cond_dim),\n nn.SiLU(),\n nn.Linear(time_cond_dim, time_cond_dim)\n )\n\n # attention related params\n\n attn_kwargs = dict(heads = attn_heads, dim_head = attn_dim_head)\n\n num_layers = len(in_out)\n\n # resnet block klass\n\n num_resnet_blocks = cast_tuple(num_resnet_blocks, num_layers)\n resnet_groups = cast_tuple(resnet_groups, num_layers)\n\n resnet_klass = partial(ResnetBlock, **attn_kwargs)\n\n layer_attns = cast_tuple(layer_attns, num_layers)\n layer_attns_depth = cast_tuple(layer_attns_depth, num_layers)\n layer_cross_attns = cast_tuple(layer_cross_attns, num_layers)\n\n use_linear_attn = cast_tuple(use_linear_attn, num_layers)\n use_linear_cross_attn = cast_tuple(use_linear_cross_attn, num_layers)\n\n assert all([layers == num_layers for layers in list(map(len, (resnet_groups, layer_attns, layer_cross_attns)))])\n\n # downsample klass\n\n downsample_klass = Downsample\n\n if cross_embed_downsample:\n downsample_klass = partial(CrossEmbedLayer, kernel_sizes = cross_embed_downsample_kernel_sizes)\n\n # initial resnet block (for memory efficient unet)\n\n self.init_resnet_block = resnet_klass(init_dim, init_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = use_global_context_attn) if memory_efficient else None\n\n # scale for resnet skip connections\n\n self.skip_connect_scale = 1. if not scale_skip_connection else (2 ** -0.5)\n\n # layers\n\n self.downs = nn.ModuleList([])\n self.ups = nn.ModuleList([])\n num_resolutions = len(in_out)\n\n layer_params = [num_resnet_blocks, resnet_groups, layer_attns, layer_attns_depth, layer_cross_attns, use_linear_attn, use_linear_cross_attn]\n reversed_layer_params = list(map(reversed, layer_params))\n\n # downsampling layers\n\n skip_connect_dims = [] # keep track of skip connection dimensions\n\n for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, layer_use_linear_attn, layer_use_linear_cross_attn) in enumerate(zip(in_out, *layer_params)):\n is_last = ind >= (num_resolutions - 1)\n\n layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None\n\n if layer_attn:\n transformer_block_klass = TransformerBlock\n elif layer_use_linear_attn:\n transformer_block_klass = LinearAttentionTransformerBlock\n else:\n transformer_block_klass = Identity\n\n current_dim = dim_in\n\n # whether to pre-downsample, from memory efficient unet\n\n pre_downsample = None\n\n if memory_efficient:\n pre_downsample = downsample_klass(dim_in, dim_out)\n current_dim = dim_out\n\n skip_connect_dims.append(current_dim)\n\n # whether to do post-downsample, for non-memory efficient unet\n\n post_downsample = None\n if not memory_efficient:\n post_downsample = downsample_klass(current_dim, dim_out) if not is_last else Parallel(nn.Conv2d(dim_in, dim_out, 3, padding = 1), nn.Conv2d(dim_in, dim_out, 1))\n\n self.downs.append(nn.ModuleList([\n pre_downsample,\n resnet_klass(current_dim, current_dim, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),\n nn.ModuleList([ResnetBlock(current_dim, current_dim, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),\n transformer_block_klass(dim = current_dim, depth = layer_attn_depth, ff_mult = ff_mult, context_dim = cond_dim, **attn_kwargs),\n post_downsample\n ]))\n\n # middle layers\n\n mid_dim = dims[-1]\n\n self.mid_block1 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])\n self.mid_attn = TransformerBlock(mid_dim, depth = layer_mid_attns_depth, **attn_kwargs) if attend_at_middle else None\n self.mid_block2 = ResnetBlock(mid_dim, mid_dim, cond_dim = cond_dim, time_cond_dim = time_cond_dim, groups = resnet_groups[-1])\n\n # upsample klass\n\n upsample_klass = Upsample if not pixel_shuffle_upsample else PixelShuffleUpsample\n\n # upsampling layers\n\n upsample_fmap_dims = []\n\n for ind, ((dim_in, dim_out), layer_num_resnet_blocks, groups, layer_attn, layer_attn_depth, layer_cross_attn, layer_use_linear_attn, layer_use_linear_cross_attn) in enumerate(zip(reversed(in_out), *reversed_layer_params)):\n is_last = ind == (len(in_out) - 1)\n\n layer_cond_dim = cond_dim if layer_cross_attn or layer_use_linear_cross_attn else None\n\n if layer_attn:\n transformer_block_klass = TransformerBlock\n elif layer_use_linear_attn:\n transformer_block_klass = LinearAttentionTransformerBlock\n else:\n transformer_block_klass = Identity\n\n skip_connect_dim = skip_connect_dims.pop()\n\n upsample_fmap_dims.append(dim_out)\n\n self.ups.append(nn.ModuleList([\n resnet_klass(dim_out + skip_connect_dim, dim_out, cond_dim = layer_cond_dim, linear_attn = layer_use_linear_cross_attn, time_cond_dim = time_cond_dim, groups = groups),\n nn.ModuleList([ResnetBlock(dim_out + skip_connect_dim, dim_out, time_cond_dim = time_cond_dim, groups = groups, use_gca = use_global_context_attn) for _ in range(layer_num_resnet_blocks)]),\n transformer_block_klass(dim = dim_out, depth = layer_attn_depth, ff_mult = ff_mult, context_dim = cond_dim, **attn_kwargs),\n upsample_klass(dim_out, dim_in) if not is_last or memory_efficient else Identity()\n ]))\n\n # whether to combine feature maps from all upsample blocks before final resnet block out\n\n self.upsample_combiner = UpsampleCombiner(\n dim = dim,\n enabled = combine_upsample_fmaps,\n dim_ins = upsample_fmap_dims,\n dim_outs = dim\n )\n\n # whether to do a final residual from initial conv to the final resnet block out\n\n self.init_conv_to_final_conv_residual = init_conv_to_final_conv_residual\n final_conv_dim = self.upsample_combiner.dim_out + (dim if init_conv_to_final_conv_residual else 0)\n\n # final optional resnet block and convolution out\n\n self.final_res_block = ResnetBlock(final_conv_dim, dim, time_cond_dim = time_cond_dim, groups = resnet_groups[0], use_gca = True) if final_resnet_block else None\n\n final_conv_dim_in = dim if final_resnet_block else final_conv_dim\n final_conv_dim_in += (channels if lowres_cond else 0)\n\n self.final_conv = nn.Conv2d(final_conv_dim_in, self.channels_out, final_conv_kernel_size, padding = final_conv_kernel_size // 2)\n\n zero_init_(self.final_conv)\n\n # resize mode\n\n self.resize_mode = resize_mode\n\n # if the current settings for the unet are not correct\n # for cascading DDPM, then reinit the unet with the right settings\n def cast_model_parameters(\n self,\n *,\n lowres_cond,\n text_embed_dim,\n channels,\n channels_out,\n cond_on_text\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/40", "ground_truth": " if lowres_cond == self.lowres_cond and \\\n channels == self.channels and \\\n cond_on_text == self.cond_on_text and \\\n text_embed_dim == self._locals['text_embed_dim'] and \\\n channels_out == self.channels_out:\n return self\n\n updated_kwargs = dict(\n lowres_cond = lowres_cond,\n text_embed_dim = text_embed_dim,\n channels = channels,\n channels_out = channels_out,\n cond_on_text = cond_on_text\n )\n\n return self.__class__(**{**self._locals, **updated_kwargs})\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 1139, "lineno": 1452, "function_name": "cast_model_parameters"}, "groundtruth": " if lowres_cond == self.lowres_cond and \\\n channels == self.channels and \\\n cond_on_text == self.cond_on_text and \\\n text_embed_dim == self._locals['text_embed_dim'] and \\\n channels_out == self.channels_out:\n return self\n\n updated_kwargs = dict(\n lowres_cond = lowres_cond,\n text_embed_dim = text_embed_dim,\n channels = channels,\n channels_out = channels_out,\n cond_on_text = cond_on_text\n )\n\n return self.__class__(**{**self._locals, **updated_kwargs})\n"} +{"prompt": "\n\n if exists(self.attn_pool):\n text_tokens = self.attn_pool(text_tokens)\n\n # extra non-attention conditioning by projecting and then summing text embeddings to time\n # termed as text hiddens\n\n mean_pooled_text_tokens = text_tokens.mean(dim = -2)\n\n text_hiddens = self.to_text_non_attn_cond(mean_pooled_text_tokens)\n\n null_text_hidden = self.null_text_hidden.to(t.dtype)\n\n text_hiddens = torch.where(\n text_keep_mask_hidden,\n text_hiddens,\n null_text_hidden\n )\n\n t = t + text_hiddens\n\n # main conditioning tokens (c)\n\n c = time_tokens if not exists(text_tokens) else torch.cat((time_tokens, text_tokens), dim = -2)\n\n # normalize conditioning tokens\n\n c = self.norm_cond(c)\n\n # initial resnet block (for memory efficient unet)\n\n if exists(self.init_resnet_block):\n x = self.init_resnet_block(x, t)\n\n # go through the layers of the unet, down and up\n\n hiddens = []\n\n for pre_downsample, init_block, resnet_blocks, attn_block, post_downsample in self.downs:\n if exists(pre_downsample):\n x = pre_downsample(x)\n\n x = init_block(x, t, c)\n\n for resnet_block in resnet_blocks:\n x = resnet_block(x, t)\n hiddens.append(x)\n\n x = attn_block(x, c)\n hiddens.append(x)\n\n if exists(post_downsample):\n x = post_downsample(x)\n\n x = self.mid_block1(x, t, c)\n\n if exists(self.mid_attn):\n x = self.mid_attn(x)\n\n x = self.mid_block2(x, t, c)\n\n add_skip_connection = lambda x: torch.cat((x, hiddens.pop() * self.skip_connect_scale), dim = 1)\n\n up_hiddens = []\n\n for init_block, resnet_blocks, attn_block, upsample in self.ups:\n x = add_skip_connection(x)\n x = init_block(x, t, c)\n\n for resnet_block in resnet_blocks:\n x = add_skip_connection(x)\n x = resnet_block(x, t)\n\n x = attn_block(x, c)\n up_hiddens.append(x.contiguous())\n x = upsample(x)\n\n # whether to combine all feature maps from upsample blocks\n\n x = self.upsample_combiner(x, up_hiddens)\n\n # final top-most residual if needed\n\n if self.init_conv_to_final_conv_residual:\n x = torch.cat((x, init_conv_residual), dim = 1)\n\n if exists(self.final_res_block):\n x = self.final_res_block(x, t)\n\n if exists(lowres_cond_img):\n x = torch.cat((x, lowres_cond_img), dim = 1)\n\n return self.final_conv(x)\n\n# null unet\n\nclass NullUnet(nn.Module):\n def __init__(self, *args, **kwargs):\n super().__init__()\n self.lowres_cond = False\n self.dummy_parameter = nn.Parameter(torch.tensor([0.]))\n\n def cast_model_parameters(self, *args, **kwargs):\n return self\n\n def forward(self, x, *args, **kwargs):\n return x\n\n# predefined unets, with configs lining up with hyperparameters in appendix of paper\n\nclass BaseUnet64(Unet):\n def __init__(self, *args, **kwargs):\n default_kwargs = dict(\n dim = 512,\n dim_mults = (1, 2, 3, 4),\n num_resnet_blocks = 3,\n layer_attns = (False, True, True, True),\n layer_cross_attns = (False, True, True, True),\n attn_heads = 8,\n ff_mult = 2.,\n memory_efficient = False\n )\n super().__init__(*args, **{**default_kwargs, **kwargs})\n\nclass SRUnet256(Unet):\n def __init__(self, *args, **kwargs):\n default_kwargs = dict(\n dim = 128,\n dim_mults = (1, 2, 4, 8),\n num_resnet_blocks = (2, 4, 8, 8),\n layer_attns = (False, False, False, True),\n layer_cross_attns = (False, False, False, True),\n attn_heads = 8,\n ff_mult = 2.,\n memory_efficient = True\n )\n super().__init__(*args, **{**default_kwargs, **kwargs})\n\nclass SRUnet1024(Unet):\n def __init__(self, *args, **kwargs):\n default_kwargs = dict(\n dim = 128,\n dim_mults = (1, 2, 4, 8),\n num_resnet_blocks = (2, 4, 8, 8),\n layer_attns = False,\n layer_cross_attns = (False, False, False, True),\n attn_heads = 8,\n ff_mult = 2.,\n memory_efficient = True\n )\n super().__init__(*args, **{**default_kwargs, **kwargs})\n\n# main imagen ddpm class, which is a cascading DDPM from Ho et al.\n\nclass Imagen(nn.Module):\n def __init__(\n self,\n unets,\n *,\n image_sizes, # for cascading ddpm, image size at each stage\n text_encoder_name = DEFAULT_T5_NAME,\n text_embed_dim = None,\n channels = 3,\n timesteps = 1000,\n cond_drop_prob = 0.1,\n loss_type = 'l2',\n noise_schedules = 'cosine',\n pred_objectives = 'noise',\n random_crop_sizes = None,\n lowres_noise_schedule = 'linear',\n lowres_sample_noise_level = 0.2, # in the paper, they present a new trick where they noise the lowres conditioning image, and at sample time, fix it to a certain level (0.1 or 0.3) - the unets are also made to be conditioned on this noise level\n per_sample_random_aug_noise_level = False, # unclear when conditioning on augmentation noise level, whether each batch element receives a random aug noise value - turning off due to @marunine's find\n condition_on_text = True,\n auto_normalize_img = True, # whether to take care of normalizing the image from [0, 1] to [-1, 1] and back automatically - you can turn this off if you want to pass in the [-1, 1] ranged image yourself from the dataloader\n p2_loss_weight_gamma = 0.5, # p2 loss weight, from https://arxiv.org/abs/2204.00227 - 0 is equivalent to weight of 1 across time\n p2_loss_weight_k = 1,\n dynamic_thresholding = True,\n dynamic_thresholding_percentile = 0.95, # unsure what this was based on perusal of paper\n only_train_unet_number = None,\n temporal_downsample_factor = 1,\n resize_cond_video_frames = True,\n resize_mode = 'nearest'\n ):\n super().__init__()\n\n # loss\n\n if loss_type == 'l1':\n loss_fn = F.l1_loss\n elif loss_type == 'l2':\n loss_fn = F.mse_loss\n elif loss_type == 'huber':\n loss_fn = F.smooth_l1_loss\n else:\n raise NotImplementedError()\n\n self.loss_type = loss_type\n self.loss_fn = loss_fn\n\n # conditioning hparams\n\n self.condition_on_text = condition_on_text\n self.unconditional = not condition_on_text\n\n # channels\n\n self.channels = channels\n\n # automatically take care of ensuring that first unet is unconditional\n # while the rest of the unets are conditioned on the low resolution image produced by previous unet\n\n unets = cast_tuple(unets)\n num_unets = len(unets)\n\n # determine noise schedules per unet\n\n timesteps = cast_tuple(timesteps, num_unets)\n\n # make sure noise schedule defaults to 'cosine', 'cosine', and then 'linear' for rest of super-resoluting unets\n\n noise_schedules = cast_tuple(noise_schedules)\n noise_schedules = pad_tuple_to_length(noise_schedules, 2, 'cosine')\n noise_schedules = pad_tuple_to_length(noise_schedules, num_unets, 'linear')\n\n # construct noise schedulers\n\n noise_scheduler_klass = GaussianDiffusionContinuousTimes\n self.noise_schedulers = nn.ModuleList([])\n\n for timestep, noise_schedule in zip(timesteps, noise_schedules):\n noise_scheduler = noise_scheduler_klass(noise_schedule = noise_schedule, timesteps = timestep)\n self.noise_schedulers.append(noise_scheduler)\n\n # randomly cropping for upsampler training\n\n self.random_crop_sizes = cast_tuple(random_crop_sizes, num_unets)\n assert not exists(first(self.random_crop_sizes)), 'you should not need to randomly crop image during training for base unet, only for upsamplers - so pass in `random_crop_sizes = (None, 128, 256)` as example'\n\n # lowres augmentation noise schedule\n\n self.lowres_noise_schedule = GaussianDiffusionContinuousTimes(noise_schedule = lowres_noise_schedule)\n\n # ddpm objectives - predicting noise by default\n\n self.pred_objectives = cast_tuple(pred_objectives, num_unets)\n\n # get text encoder\n\n self.text_encoder_name = text_encoder_name\n self.text_embed_dim = default(text_embed_dim, lambda: get_encoded_dim(text_encoder_name))\n\n self.encode_text = partial(t5_encode_text, name = text_encoder_name)\n\n # construct unets\n\n self.unets = nn.ModuleList([])\n\n self.unet_being_trained_index = -1 # keeps track of which unet is being trained at the moment\n self.only_train_unet_number = only_train_unet_number\n\n for ind, one_unet in enumerate(unets):\n assert isinstance(one_unet, (Unet, Unet3D, NullUnet))\n is_first = ind == 0\n\n one_unet = one_unet.cast_model_parameters(\n lowres_cond = not is_first,\n cond_on_text = self.condition_on_text,\n text_embed_dim = self.text_embed_dim if self.condition_on_text else None,\n channels = self.channels,\n channels_out = self.channels\n )\n\n self.unets.append(one_unet)\n\n # unet image sizes\n\n image_sizes = cast_tuple(image_sizes)\n self.image_sizes = image_sizes\n\n assert num_unets == len(image_sizes), f'you did not supply the correct number of u-nets ({len(unets)}) for resolutions {image_sizes}'\n\n self.sample_channels = cast_tuple(self.channels, num_unets)\n\n # determine whether we are training on images or video\n\n is_video = any([isinstance(unet, Unet3D) for unet in self.unets])\n self.is_video = is_video\n\n self.right_pad_dims_to_datatype = partial(rearrange, pattern = ('b -> b 1 1 1' if not is_video else 'b -> b 1 1 1 1'))\n\n self.resize_to = resize_video_to if is_video else resize_image_to\n self.resize_to = partial(self.resize_to, mode = resize_mode)\n\n # temporal interpolation\n\n temporal_downsample_factor = cast_tuple(temporal_downsample_factor, num_unets)\n self.temporal_downsample_factor = temporal_downsample_factor\n\n self.resize_cond_video_frames = resize_cond_video_frames\n self.temporal_downsample_divisor = temporal_downsample_factor[0]\n\n assert temporal_downsample_factor[-1] == 1, 'downsample factor of last stage must be 1'\n assert tuple(sorted(temporal_downsample_factor, reverse = True)) == temporal_downsample_factor, 'temporal downsample factor must be in order of descending'\n\n # cascading ddpm related stuff\n\n lowres_conditions = tuple(map(lambda t: t.lowres_cond, self.unets))\n assert lowres_conditions == (False, *((True,) * (num_unets - 1))), 'the first unet must be unconditioned (by low resolution image), and the rest of the unets must have `lowres_cond` set to True'\n\n self.lowres_sample_noise_level = lowres_sample_noise_level\n self.per_sample_random_aug_noise_level = per_sample_random_aug_noise_level\n\n # classifier free guidance\n\n self.cond_drop_prob = cond_drop_prob\n self.can_classifier_guidance = cond_drop_prob > 0.\n\n # normalize and unnormalize image functions\n\n self.normalize_img = normalize_neg_one_to_one if auto_normalize_img else identity\n self.unnormalize_img = unnormalize_zero_to_one if auto_normalize_img else identity\n self.input_image_range = (0. if auto_normalize_img else -1., 1.)\n\n # dynamic thresholding\n\n self.dynamic_thresholding = cast_tuple(dynamic_thresholding, num_unets)\n self.dynamic_thresholding_percentile = dynamic_thresholding_percentile\n\n # p2 loss weight\n\n self.p2_loss_weight_k = p2_loss_weight_k\n self.p2_loss_weight_gamma = cast_tuple(p2_loss_weight_gamma, num_unets)\n\n assert all([(gamma_value <= 2) for gamma_value in self.p2_loss_weight_gamma]), 'in paper, they noticed any gamma greater than 2 is harmful'\n\n # one temp parameter for keeping track of device\n\n self.register_buffer('_temp', torch.tensor([0.]), persistent = False)\n\n # default to device of unets passed in\n\n self.to(next(self.unets.parameters()).device)\n\n def force_unconditional_(self):\n self.condition_on_text = False\n self.unconditional = True\n\n for unet in self.unets:\n unet.cond_on_text = False\n\n @property\n def device(self):\n return self._temp.device\n\n def get_unet(self, unet_number):", "metadata": {"task_id": "lucidrains--imagen-pytorch/41", "ground_truth": " assert 0 < unet_number <= len(self.unets)\n index = unet_number - 1\n\n if isinstance(self.unets, nn.ModuleList):\n unets_list = [unet for unet in self.unets]\n delattr(self, 'unets')\n self.unets = unets_list\n\n if index != self.unet_being_trained_index:\n for unet_index, unet in enumerate(self.unets):\n unet.to(self.device if unet_index == index else 'cpu')\n\n self.unet_being_trained_index = index\n return self.unets[index]\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_pytorch.py"], "context_start_lineno": 1629, "lineno": 1985, "function_name": "get_unet"}, "groundtruth": " assert 0 < unet_number <= len(self.unets)\n index = unet_number - 1\n\n if isinstance(self.unets, nn.ModuleList):\n unets_list = [unet for unet in self.unets]\n delattr(self, 'unets')\n self.unets = unets_list\n\n if index != self.unet_being_trained_index:\n for unet_index, unet in enumerate(self.unets):\n unet.to(self.device if unet_index == index else 'cpu')\n\n self.unet_being_trained_index = index\n return self.unets[index]\n"} +{"prompt": "import json\nfrom pydantic import BaseModel, validator, root_validator\nfrom typing import List, Iterable, Optional, Union, Tuple, Dict, Any\nfrom enum import Enum\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, Unet, Unet3D, NullUnet\nfrom imagen_pytorch.trainer import ImagenTrainer\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.t5 import DEFAULT_T5_NAME, get_encoded_dim\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n return val if exists(val) else d\n\ndef ListOrTuple(inner_type):\n return Union[List[inner_type], Tuple[inner_type]]\n\ndef SingleOrList(inner_type):\n return Union[inner_type, ListOrTuple(inner_type)]\n\n# noise schedule\n\nclass NoiseSchedule(Enum):\n cosine = 'cosine'\n linear = 'linear'\n\nclass AllowExtraBaseModel(BaseModel):\n class Config:\n extra = \"allow\"\n use_enum_values = True\n\n# imagen pydantic classes\n\nclass NullUnetConfig(BaseModel):\n is_null: bool\n\n def create(self):\n return NullUnet()\n\nclass UnetConfig(AllowExtraBaseModel):\n dim: int\n dim_mults: ListOrTuple(int)\n text_embed_dim: int = get_encoded_dim(DEFAULT_T5_NAME)\n cond_dim: int = None\n channels: int = 3\n attn_dim_head: int = 32\n attn_heads: int = 16\n\n def create(self):\n return Unet(**self.dict())\n\nclass Unet3DConfig(AllowExtraBaseModel):\n dim: int\n dim_mults: ListOrTuple(int)\n text_embed_dim: int = get_encoded_dim(DEFAULT_T5_NAME)\n cond_dim: int = None\n channels: int = 3\n attn_dim_head: int = 32\n attn_heads: int = 16\n\n def create(self):\n return Unet3D(**self.dict())\n\nclass ImagenConfig(AllowExtraBaseModel):\n unets: ListOrTuple(Union[UnetConfig, Unet3DConfig, NullUnetConfig])\n image_sizes: ListOrTuple(int)\n video: bool = False\n timesteps: SingleOrList(int) = 1000\n noise_schedules: SingleOrList(NoiseSchedule) = 'cosine'\n text_encoder_name: str = DEFAULT_T5_NAME\n channels: int = 3\n loss_type: str = 'l2'\n cond_drop_prob: float = 0.5\n\n @validator('image_sizes')\n def check_image_sizes(cls, image_sizes, values):", "metadata": {"task_id": "lucidrains--imagen-pytorch/42", "ground_truth": " unets = values.get('unets')\n if len(image_sizes) != len(unets):\n raise ValueError(f'image sizes length {len(image_sizes)} must be equivalent to the number of unets {len(unets)}')\n return image_sizes\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "configs.py"], "context_start_lineno": 0, "lineno": 80, "function_name": "check_image_sizes"}, "groundtruth": " unets = values.get('unets')\n if len(image_sizes) != len(unets):\n raise ValueError(f'image sizes length {len(image_sizes)} must be equivalent to the number of unets {len(unets)}')\n return image_sizes\n"} +{"prompt": "import json\nfrom pydantic import BaseModel, validator, root_validator\nfrom typing import List, Iterable, Optional, Union, Tuple, Dict, Any\nfrom enum import Enum\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, Unet, Unet3D, NullUnet\nfrom imagen_pytorch.trainer import ImagenTrainer\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.t5 import DEFAULT_T5_NAME, get_encoded_dim\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n return val if exists(val) else d\n\ndef ListOrTuple(inner_type):\n return Union[List[inner_type], Tuple[inner_type]]\n\ndef SingleOrList(inner_type):\n return Union[inner_type, ListOrTuple(inner_type)]\n\n# noise schedule\n\nclass NoiseSchedule(Enum):\n cosine = 'cosine'\n linear = 'linear'\n\nclass AllowExtraBaseModel(BaseModel):\n class Config:\n extra = \"allow\"\n use_enum_values = True\n\n# imagen pydantic classes\n\nclass NullUnetConfig(BaseModel):\n is_null: bool\n\n def create(self):\n return NullUnet()\n\nclass UnetConfig(AllowExtraBaseModel):\n dim: int\n dim_mults: ListOrTuple(int)\n text_embed_dim: int = get_encoded_dim(DEFAULT_T5_NAME)\n cond_dim: int = None\n channels: int = 3\n attn_dim_head: int = 32\n attn_heads: int = 16\n\n def create(self):\n return Unet(**self.dict())\n\nclass Unet3DConfig(AllowExtraBaseModel):\n dim: int\n dim_mults: ListOrTuple(int)\n text_embed_dim: int = get_encoded_dim(DEFAULT_T5_NAME)\n cond_dim: int = None\n channels: int = 3\n attn_dim_head: int = 32\n attn_heads: int = 16\n\n def create(self):\n return Unet3D(**self.dict())\n\nclass ImagenConfig(AllowExtraBaseModel):\n unets: ListOrTuple(Union[UnetConfig, Unet3DConfig, NullUnetConfig])\n image_sizes: ListOrTuple(int)\n video: bool = False\n timesteps: SingleOrList(int) = 1000\n noise_schedules: SingleOrList(NoiseSchedule) = 'cosine'\n text_encoder_name: str = DEFAULT_T5_NAME\n channels: int = 3\n loss_type: str = 'l2'\n cond_drop_prob: float = 0.5\n\n @validator('image_sizes')\n def check_image_sizes(cls, image_sizes, values):\n unets = values.get('unets')\n if len(image_sizes) != len(unets):\n raise ValueError(f'image sizes length {len(image_sizes)} must be equivalent to the number of unets {len(unets)}')\n return image_sizes\n\n def create(self):", "metadata": {"task_id": "lucidrains--imagen-pytorch/43", "ground_truth": " decoder_kwargs = self.dict()\n unets_kwargs = decoder_kwargs.pop('unets')\n is_video = decoder_kwargs.pop('video', False)\n\n unets = []\n\n for unet, unet_kwargs in zip(self.unets, unets_kwargs):\n if isinstance(unet, NullUnetConfig):\n unet_klass = NullUnet\n elif is_video:\n unet_klass = Unet3D\n else:\n unet_klass = Unet\n\n unets.append(unet_klass(**unet_kwargs))\n\n imagen = Imagen(unets, **decoder_kwargs)\n\n imagen._config = self.dict().copy()\n return imagen\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "configs.py"], "context_start_lineno": 0, "lineno": 86, "function_name": "create"}, "groundtruth": " decoder_kwargs = self.dict()\n unets_kwargs = decoder_kwargs.pop('unets')\n is_video = decoder_kwargs.pop('video', False)\n\n unets = []\n\n for unet, unet_kwargs in zip(self.unets, unets_kwargs):\n if isinstance(unet, NullUnetConfig):\n unet_klass = NullUnet\n elif is_video:\n unet_klass = Unet3D\n else:\n unet_klass = Unet\n\n unets.append(unet_klass(**unet_kwargs))\n\n imagen = Imagen(unets, **decoder_kwargs)\n\n imagen._config = self.dict().copy()\n return imagen\n"} +{"prompt": "from pathlib import Path\nfrom functools import partial\n\nimport torch\nfrom torch import nn\nfrom torch.utils.data import Dataset, DataLoader\nfrom torchvision import transforms as T, utils\nimport torch.nn.functional as F\nfrom imagen_pytorch import t5\nfrom torch.nn.utils.rnn import pad_sequence\n\nfrom PIL import Image\n\nfrom datasets.utils.file_utils import get_datasets_user_agent\nimport io\nimport urllib\n\nUSER_AGENT = get_datasets_user_agent()\n\n# helpers functions\n\ndef exists(val):\n return val is not None\n\ndef cycle(dl):", "metadata": {"task_id": "lucidrains--imagen-pytorch/44", "ground_truth": " while True:\n for data in dl:\n yield data\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "data.py"], "context_start_lineno": 0, "lineno": 25, "function_name": "cycle"}, "groundtruth": " while True:\n for data in dl:\n yield data\n"} +{"prompt": "import math\nimport copy\nimport operator\nimport functools\nfrom typing import List\nfrom tqdm.auto import tqdm\nfrom functools import partial, wraps\nfrom contextlib import contextmanager, nullcontext\nfrom collections import namedtuple\nfrom pathlib import Path\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, einsum\n\nfrom einops import rearrange, repeat, reduce, pack, unpack\nfrom einops.layers.torch import Rearrange, Reduce\nfrom einops_exts import rearrange_many, repeat_many, check_shape\nfrom einops_exts.torch import EinopsToAndFrom\n\nfrom imagen_pytorch.t5 import t5_encode_text, get_encoded_dim, DEFAULT_T5_NAME\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef identity(t, *args, **kwargs):\n return t\n\ndef first(arr, d = None):\n if len(arr) == 0:\n return d\n return arr[0]\n\ndef divisible_by(numer, denom):\n return (numer % denom) == 0\n\ndef maybe(fn):\n @wraps(fn)\n def inner(x):\n if not exists(x):\n return x\n return fn(x)\n return inner\n\ndef once(fn):", "metadata": {"task_id": "lucidrains--imagen-pytorch/45", "ground_truth": " called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "imagen_video.py"], "context_start_lineno": 0, "lineno": 47, "function_name": "once"}, "groundtruth": " called = False\n @wraps(fn)\n def inner(x):\n nonlocal called\n if called:\n return\n called = True\n return fn(x)\n return inner\n"} +{"prompt": "import torch\nimport transformers\nfrom typing import List\nfrom transformers import T5Tokenizer, T5EncoderModel, T5Config\nfrom einops import rearrange\n\ntransformers.logging.set_verbosity_error()\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\n# config\n\nMAX_LENGTH = 256\n\nDEFAULT_T5_NAME = 'google/t5-v1_1-base'\n\nT5_CONFIGS = {}\n\n# singleton globals\n\ndef get_tokenizer(name):\n tokenizer = T5Tokenizer.from_pretrained(name, model_max_length=MAX_LENGTH)\n return tokenizer\n\ndef get_model(name):\n model = T5EncoderModel.from_pretrained(name)\n return model\n\ndef get_model_and_tokenizer(name):\n global T5_CONFIGS\n\n if name not in T5_CONFIGS:\n T5_CONFIGS[name] = dict()\n if \"model\" not in T5_CONFIGS[name]:\n T5_CONFIGS[name][\"model\"] = get_model(name)\n if \"tokenizer\" not in T5_CONFIGS[name]:\n T5_CONFIGS[name][\"tokenizer\"] = get_tokenizer(name)\n\n return T5_CONFIGS[name]['model'], T5_CONFIGS[name]['tokenizer']\n\ndef get_encoded_dim(name):", "metadata": {"task_id": "lucidrains--imagen-pytorch/46", "ground_truth": " if name not in T5_CONFIGS:\n # avoids loading the model if we only want to get the dim\n config = T5Config.from_pretrained(name)\n T5_CONFIGS[name] = dict(config=config)\n elif \"config\" in T5_CONFIGS[name]:\n config = T5_CONFIGS[name][\"config\"]\n elif \"model\" in T5_CONFIGS[name]:\n config = T5_CONFIGS[name][\"model\"].config\n else:\n assert False\n return config.d_model\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "t5.py"], "context_start_lineno": 0, "lineno": 47, "function_name": "get_encoded_dim"}, "groundtruth": " if name not in T5_CONFIGS:\n # avoids loading the model if we only want to get the dim\n config = T5Config.from_pretrained(name)\n T5_CONFIGS[name] = dict(config=config)\n elif \"config\" in T5_CONFIGS[name]:\n config = T5_CONFIGS[name][\"config\"]\n elif \"model\" in T5_CONFIGS[name]:\n config = T5_CONFIGS[name][\"model\"].config\n else:\n assert False\n return config.d_model\n"} +{"prompt": "import os\nimport time\nimport copy\nfrom pathlib import Path\nfrom math import ceil\nfrom contextlib import contextmanager, nullcontext\nfrom functools import partial, wraps\nfrom collections.abc import Iterable\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import random_split, DataLoader\nfrom torch.optim import Adam\nfrom lion_pytorch import Lion\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\nfrom torch.cuda.amp import autocast, GradScaler\n\nimport pytorch_warmup as warmup\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, NullUnet\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.data import cycle\n\nfrom imagen_pytorch.version import __version__\nfrom packaging import version\n\nimport numpy as np\n\nfrom ema_pytorch import EMA\n\nfrom accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs\n\nfrom fsspec.core import url_to_fs\nfrom fsspec.implementations.local import LocalFileSystem\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):", "metadata": {"task_id": "lucidrains--imagen-pytorch/47", "ground_truth": " if exists(val):\n return val\n return d() if callable(d) else d\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 0, "lineno": 42, "function_name": "default"}, "groundtruth": " if exists(val):\n return val\n return d() if callable(d) else d\n"} +{"prompt": "import os\nimport time\nimport copy\nfrom pathlib import Path\nfrom math import ceil\nfrom contextlib import contextmanager, nullcontext\nfrom functools import partial, wraps\nfrom collections.abc import Iterable\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import random_split, DataLoader\nfrom torch.optim import Adam\nfrom lion_pytorch import Lion\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\nfrom torch.cuda.amp import autocast, GradScaler\n\nimport pytorch_warmup as warmup\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, NullUnet\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.data import cycle\n\nfrom imagen_pytorch.version import __version__\nfrom packaging import version\n\nimport numpy as np\n\nfrom ema_pytorch import EMA\n\nfrom accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs\n\nfrom fsspec.core import url_to_fs\nfrom fsspec.implementations.local import LocalFileSystem\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = 1):", "metadata": {"task_id": "lucidrains--imagen-pytorch/48", "ground_truth": " if isinstance(val, list):\n val = tuple(val)\n\n return val if isinstance(val, tuple) else ((val,) * length)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 0, "lineno": 47, "function_name": "cast_tuple"}, "groundtruth": " if isinstance(val, list):\n val = tuple(val)\n\n return val if isinstance(val, tuple) else ((val,) * length)\n"} +{"prompt": "import os\nimport time\nimport copy\nfrom pathlib import Path\nfrom math import ceil\nfrom contextlib import contextmanager, nullcontext\nfrom functools import partial, wraps\nfrom collections.abc import Iterable\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import random_split, DataLoader\nfrom torch.optim import Adam\nfrom lion_pytorch import Lion\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\nfrom torch.cuda.amp import autocast, GradScaler\n\nimport pytorch_warmup as warmup\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, NullUnet\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.data import cycle\n\nfrom imagen_pytorch.version import __version__\nfrom packaging import version\n\nimport numpy as np\n\nfrom ema_pytorch import EMA\n\nfrom accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs\n\nfrom fsspec.core import url_to_fs\nfrom fsspec.implementations.local import LocalFileSystem\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = 1):\n if isinstance(val, list):\n val = tuple(val)\n\n return val if isinstance(val, tuple) else ((val,) * length)\n\ndef find_first(fn, arr):\n for ind, el in enumerate(arr):\n if fn(el):\n return ind\n return -1\n\ndef pick_and_pop(keys, d):\n values = list(map(lambda key: d.pop(key), keys))\n return dict(zip(keys, values))\n\ndef group_dict_by_key(cond, d):", "metadata": {"task_id": "lucidrains--imagen-pytorch/49", "ground_truth": " return_val = [dict(),dict()]\n for key in d.keys():\n match = bool(cond(key))\n ind = int(not match)\n return_val[ind][key] = d[key]\n return (*return_val,)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 0, "lineno": 63, "function_name": "group_dict_by_key"}, "groundtruth": " return_val = [dict(),dict()]\n for key in d.keys():\n match = bool(cond(key))\n ind = int(not match)\n return_val[ind][key] = d[key]\n return (*return_val,)\n"} +{"prompt": "import os\nimport time\nimport copy\nfrom pathlib import Path\nfrom math import ceil\nfrom contextlib import contextmanager, nullcontext\nfrom functools import partial, wraps\nfrom collections.abc import Iterable\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import random_split, DataLoader\nfrom torch.optim import Adam\nfrom lion_pytorch import Lion\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\nfrom torch.cuda.amp import autocast, GradScaler\n\nimport pytorch_warmup as warmup\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, NullUnet\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.data import cycle\n\nfrom imagen_pytorch.version import __version__\nfrom packaging import version\n\nimport numpy as np\n\nfrom ema_pytorch import EMA\n\nfrom accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs\n\nfrom fsspec.core import url_to_fs\nfrom fsspec.implementations.local import LocalFileSystem\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = 1):\n if isinstance(val, list):\n val = tuple(val)\n\n return val if isinstance(val, tuple) else ((val,) * length)\n\ndef find_first(fn, arr):\n for ind, el in enumerate(arr):\n if fn(el):\n return ind\n return -1\n\ndef pick_and_pop(keys, d):\n values = list(map(lambda key: d.pop(key), keys))\n return dict(zip(keys, values))\n\ndef group_dict_by_key(cond, d):\n return_val = [dict(),dict()]\n for key in d.keys():\n match = bool(cond(key))\n ind = int(not match)\n return_val[ind][key] = d[key]\n return (*return_val,)\n\ndef string_begins_with(prefix, str):\n return str.startswith(prefix)\n\ndef group_by_key_prefix(prefix, d):\n return group_dict_by_key(partial(string_begins_with, prefix), d)\n\ndef groupby_prefix_and_trim(prefix, d):", "metadata": {"task_id": "lucidrains--imagen-pytorch/50", "ground_truth": " kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)\n kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))\n return kwargs_without_prefix, kwargs\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 0, "lineno": 77, "function_name": "groupby_prefix_and_trim"}, "groundtruth": " kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)\n kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))\n return kwargs_without_prefix, kwargs\n"} +{"prompt": "import os\nimport time\nimport copy\nfrom pathlib import Path\nfrom math import ceil\nfrom contextlib import contextmanager, nullcontext\nfrom functools import partial, wraps\nfrom collections.abc import Iterable\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import random_split, DataLoader\nfrom torch.optim import Adam\nfrom lion_pytorch import Lion\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\nfrom torch.cuda.amp import autocast, GradScaler\n\nimport pytorch_warmup as warmup\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, NullUnet\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.data import cycle\n\nfrom imagen_pytorch.version import __version__\nfrom packaging import version\n\nimport numpy as np\n\nfrom ema_pytorch import EMA\n\nfrom accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs\n\nfrom fsspec.core import url_to_fs\nfrom fsspec.implementations.local import LocalFileSystem\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = 1):\n if isinstance(val, list):\n val = tuple(val)\n\n return val if isinstance(val, tuple) else ((val,) * length)\n\ndef find_first(fn, arr):\n for ind, el in enumerate(arr):\n if fn(el):\n return ind\n return -1\n\ndef pick_and_pop(keys, d):\n values = list(map(lambda key: d.pop(key), keys))\n return dict(zip(keys, values))\n\ndef group_dict_by_key(cond, d):\n return_val = [dict(),dict()]\n for key in d.keys():\n match = bool(cond(key))\n ind = int(not match)\n return_val[ind][key] = d[key]\n return (*return_val,)\n\ndef string_begins_with(prefix, str):\n return str.startswith(prefix)\n\ndef group_by_key_prefix(prefix, d):\n return group_dict_by_key(partial(string_begins_with, prefix), d)\n\ndef groupby_prefix_and_trim(prefix, d):\n kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)\n kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))\n return kwargs_without_prefix, kwargs\n\ndef num_to_groups(num, divisor):", "metadata": {"task_id": "lucidrains--imagen-pytorch/51", "ground_truth": " groups = num // divisor\n remainder = num % divisor\n arr = [divisor] * groups\n if remainder > 0:\n arr.append(remainder)\n return arr\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 0, "lineno": 82, "function_name": "num_to_groups"}, "groundtruth": " groups = num // divisor\n remainder = num % divisor\n arr = [divisor] * groups\n if remainder > 0:\n arr.append(remainder)\n return arr\n"} +{"prompt": "import os\nimport time\nimport copy\nfrom pathlib import Path\nfrom math import ceil\nfrom contextlib import contextmanager, nullcontext\nfrom functools import partial, wraps\nfrom collections.abc import Iterable\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import random_split, DataLoader\nfrom torch.optim import Adam\nfrom lion_pytorch import Lion\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\nfrom torch.cuda.amp import autocast, GradScaler\n\nimport pytorch_warmup as warmup\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, NullUnet\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.data import cycle\n\nfrom imagen_pytorch.version import __version__\nfrom packaging import version\n\nimport numpy as np\n\nfrom ema_pytorch import EMA\n\nfrom accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs\n\nfrom fsspec.core import url_to_fs\nfrom fsspec.implementations.local import LocalFileSystem\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = 1):\n if isinstance(val, list):\n val = tuple(val)\n\n return val if isinstance(val, tuple) else ((val,) * length)\n\ndef find_first(fn, arr):\n for ind, el in enumerate(arr):\n if fn(el):\n return ind\n return -1\n\ndef pick_and_pop(keys, d):\n values = list(map(lambda key: d.pop(key), keys))\n return dict(zip(keys, values))\n\ndef group_dict_by_key(cond, d):\n return_val = [dict(),dict()]\n for key in d.keys():\n match = bool(cond(key))\n ind = int(not match)\n return_val[ind][key] = d[key]\n return (*return_val,)\n\ndef string_begins_with(prefix, str):\n return str.startswith(prefix)\n\ndef group_by_key_prefix(prefix, d):\n return group_dict_by_key(partial(string_begins_with, prefix), d)\n\ndef groupby_prefix_and_trim(prefix, d):\n kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)\n kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))\n return kwargs_without_prefix, kwargs\n\ndef num_to_groups(num, divisor):\n groups = num // divisor\n remainder = num % divisor\n arr = [divisor] * groups\n if remainder > 0:\n arr.append(remainder)\n return arr\n\n# url to fs, bucket, path - for checkpointing to cloud\n\ndef url_to_bucket(url):\n if '://' not in url:\n return url\n\n _, suffix = url.split('://')\n\n if prefix in {'gs', 's3'}:\n return suffix.split('/')[0]\n else:\n raise ValueError(f'storage type prefix \"{prefix}\" is not supported yet')\n\n# decorators\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef cast_torch_tensor(fn, cast_fp16 = False):\n @wraps(fn)\n def inner(model, *args, **kwargs):", "metadata": {"task_id": "lucidrains--imagen-pytorch/52", "ground_truth": " device = kwargs.pop('_device', model.device)\n cast_device = kwargs.pop('_cast_device', True)\n\n should_cast_fp16 = cast_fp16 and model.cast_half_at_training\n\n kwargs_keys = kwargs.keys()\n all_args = (*args, *kwargs.values())\n split_kwargs_index = len(all_args) - len(kwargs_keys)\n all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))\n\n if cast_device:\n all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))\n\n if should_cast_fp16:\n all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))\n\n args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]\n kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))\n\n out = fn(model, *args, **kwargs)\n return out\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 0, "lineno": 116, "function_name": "inner"}, "groundtruth": " device = kwargs.pop('_device', model.device)\n cast_device = kwargs.pop('_cast_device', True)\n\n should_cast_fp16 = cast_fp16 and model.cast_half_at_training\n\n kwargs_keys = kwargs.keys()\n all_args = (*args, *kwargs.values())\n split_kwargs_index = len(all_args) - len(kwargs_keys)\n all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))\n\n if cast_device:\n all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))\n\n if should_cast_fp16:\n all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))\n\n args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]\n kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))\n\n out = fn(model, *args, **kwargs)\n return out\n"} +{"prompt": "import os\nimport time\nimport copy\nfrom pathlib import Path\nfrom math import ceil\nfrom contextlib import contextmanager, nullcontext\nfrom functools import partial, wraps\nfrom collections.abc import Iterable\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.utils.data import random_split, DataLoader\nfrom torch.optim import Adam\nfrom lion_pytorch import Lion\nfrom torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\nfrom torch.cuda.amp import autocast, GradScaler\n\nimport pytorch_warmup as warmup\n\nfrom imagen_pytorch.imagen_pytorch import Imagen, NullUnet\nfrom imagen_pytorch.elucidated_imagen import ElucidatedImagen\nfrom imagen_pytorch.data import cycle\n\nfrom imagen_pytorch.version import __version__\nfrom packaging import version\n\nimport numpy as np\n\nfrom ema_pytorch import EMA\n\nfrom accelerate import Accelerator, DistributedType, DistributedDataParallelKwargs\n\nfrom fsspec.core import url_to_fs\nfrom fsspec.implementations.local import LocalFileSystem\n\n# helper functions\n\ndef exists(val):\n return val is not None\n\ndef default(val, d):\n if exists(val):\n return val\n return d() if callable(d) else d\n\ndef cast_tuple(val, length = 1):\n if isinstance(val, list):\n val = tuple(val)\n\n return val if isinstance(val, tuple) else ((val,) * length)\n\ndef find_first(fn, arr):\n for ind, el in enumerate(arr):\n if fn(el):\n return ind\n return -1\n\ndef pick_and_pop(keys, d):\n values = list(map(lambda key: d.pop(key), keys))\n return dict(zip(keys, values))\n\ndef group_dict_by_key(cond, d):\n return_val = [dict(),dict()]\n for key in d.keys():\n match = bool(cond(key))\n ind = int(not match)\n return_val[ind][key] = d[key]\n return (*return_val,)\n\ndef string_begins_with(prefix, str):\n return str.startswith(prefix)\n\ndef group_by_key_prefix(prefix, d):\n return group_dict_by_key(partial(string_begins_with, prefix), d)\n\ndef groupby_prefix_and_trim(prefix, d):\n kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)\n kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))\n return kwargs_without_prefix, kwargs\n\ndef num_to_groups(num, divisor):\n groups = num // divisor\n remainder = num % divisor\n arr = [divisor] * groups\n if remainder > 0:\n arr.append(remainder)\n return arr\n\n# url to fs, bucket, path - for checkpointing to cloud\n\ndef url_to_bucket(url):\n if '://' not in url:\n return url\n\n _, suffix = url.split('://')\n\n if prefix in {'gs', 's3'}:\n return suffix.split('/')[0]\n else:\n raise ValueError(f'storage type prefix \"{prefix}\" is not supported yet')\n\n# decorators\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef cast_torch_tensor(fn, cast_fp16 = False):\n @wraps(fn)\n def inner(model, *args, **kwargs):\n device = kwargs.pop('_device', model.device)\n cast_device = kwargs.pop('_cast_device', True)\n\n should_cast_fp16 = cast_fp16 and model.cast_half_at_training\n\n kwargs_keys = kwargs.keys()\n all_args = (*args, *kwargs.values())\n split_kwargs_index = len(all_args) - len(kwargs_keys)\n all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))\n\n if cast_device:\n all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))\n\n if should_cast_fp16:\n all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))\n\n args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]\n kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))\n\n out = fn(model, *args, **kwargs)\n return out\n return inner\n\n# gradient accumulation functions\n\ndef split_iterable(it, split_size):\n accum = []\n for ind in range(ceil(len(it) / split_size)):\n start_index = ind * split_size\n accum.append(it[start_index: (start_index + split_size)])\n return accum\n\ndef split(t, split_size = None):\n if not exists(split_size):\n return t\n\n if isinstance(t, torch.Tensor):\n return t.split(split_size, dim = 0)\n\n if isinstance(t, Iterable):\n return split_iterable(t, split_size)\n\n return TypeError\n\ndef find_first(cond, arr):", "metadata": {"task_id": "lucidrains--imagen-pytorch/53", "ground_truth": " for el in arr:\n if cond(el):\n return el\n return None\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 0, "lineno": 161, "function_name": "find_first"}, "groundtruth": " for el in arr:\n if cond(el):\n return el\n return None\n"} +{"prompt": ", arr):\n for ind, el in enumerate(arr):\n if fn(el):\n return ind\n return -1\n\ndef pick_and_pop(keys, d):\n values = list(map(lambda key: d.pop(key), keys))\n return dict(zip(keys, values))\n\ndef group_dict_by_key(cond, d):\n return_val = [dict(),dict()]\n for key in d.keys():\n match = bool(cond(key))\n ind = int(not match)\n return_val[ind][key] = d[key]\n return (*return_val,)\n\ndef string_begins_with(prefix, str):\n return str.startswith(prefix)\n\ndef group_by_key_prefix(prefix, d):\n return group_dict_by_key(partial(string_begins_with, prefix), d)\n\ndef groupby_prefix_and_trim(prefix, d):\n kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d)\n kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items())))\n return kwargs_without_prefix, kwargs\n\ndef num_to_groups(num, divisor):\n groups = num // divisor\n remainder = num % divisor\n arr = [divisor] * groups\n if remainder > 0:\n arr.append(remainder)\n return arr\n\n# url to fs, bucket, path - for checkpointing to cloud\n\ndef url_to_bucket(url):\n if '://' not in url:\n return url\n\n _, suffix = url.split('://')\n\n if prefix in {'gs', 's3'}:\n return suffix.split('/')[0]\n else:\n raise ValueError(f'storage type prefix \"{prefix}\" is not supported yet')\n\n# decorators\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef cast_torch_tensor(fn, cast_fp16 = False):\n @wraps(fn)\n def inner(model, *args, **kwargs):\n device = kwargs.pop('_device', model.device)\n cast_device = kwargs.pop('_cast_device', True)\n\n should_cast_fp16 = cast_fp16 and model.cast_half_at_training\n\n kwargs_keys = kwargs.keys()\n all_args = (*args, *kwargs.values())\n split_kwargs_index = len(all_args) - len(kwargs_keys)\n all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))\n\n if cast_device:\n all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))\n\n if should_cast_fp16:\n all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))\n\n args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]\n kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))\n\n out = fn(model, *args, **kwargs)\n return out\n return inner\n\n# gradient accumulation functions\n\ndef split_iterable(it, split_size):\n accum = []\n for ind in range(ceil(len(it) / split_size)):\n start_index = ind * split_size\n accum.append(it[start_index: (start_index + split_size)])\n return accum\n\ndef split(t, split_size = None):\n if not exists(split_size):\n return t\n\n if isinstance(t, torch.Tensor):\n return t.split(split_size, dim = 0)\n\n if isinstance(t, Iterable):\n return split_iterable(t, split_size)\n\n return TypeError\n\ndef find_first(cond, arr):\n for el in arr:\n if cond(el):\n return el\n return None\n\ndef split_args_and_kwargs(*args, split_size = None, **kwargs):\n all_args = (*args, *kwargs.values())\n len_all_args = len(all_args)\n first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)\n assert exists(first_tensor)\n\n batch_size = len(first_tensor)\n split_size = default(split_size, batch_size)\n num_chunks = ceil(batch_size / split_size)\n\n dict_len = len(kwargs)\n dict_keys = kwargs.keys()\n split_kwargs_index = len_all_args - dict_len\n\n split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]\n chunk_sizes = num_to_groups(batch_size, split_size)\n\n for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):\n chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]\n chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))\n chunk_size_frac = chunk_size / batch_size\n yield chunk_size_frac, (chunked_args, chunked_kwargs)\n\n# imagen trainer\n\ndef imagen_sample_in_chunks(fn):\n @wraps(fn)\n def inner(self, *args, max_batch_size = None, **kwargs):\n if not exists(max_batch_size):\n return fn(self, *args, **kwargs)\n\n if self.imagen.unconditional:\n batch_size = kwargs.get('batch_size')\n batch_sizes = num_to_groups(batch_size, max_batch_size)\n outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]\n else:\n outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]\n\n if isinstance(outputs[0], torch.Tensor):\n return torch.cat(outputs, dim = 0)\n\n return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):", "metadata": {"task_id": "lucidrains--imagen-pytorch/54", "ground_truth": " assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 52, "lineno": 417, "function_name": "prepare"}, "groundtruth": " assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n"} +{"prompt": " = num % divisor\n arr = [divisor] * groups\n if remainder > 0:\n arr.append(remainder)\n return arr\n\n# url to fs, bucket, path - for checkpointing to cloud\n\ndef url_to_bucket(url):\n if '://' not in url:\n return url\n\n _, suffix = url.split('://')\n\n if prefix in {'gs', 's3'}:\n return suffix.split('/')[0]\n else:\n raise ValueError(f'storage type prefix \"{prefix}\" is not supported yet')\n\n# decorators\n\ndef eval_decorator(fn):\n def inner(model, *args, **kwargs):\n was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef cast_torch_tensor(fn, cast_fp16 = False):\n @wraps(fn)\n def inner(model, *args, **kwargs):\n device = kwargs.pop('_device', model.device)\n cast_device = kwargs.pop('_cast_device', True)\n\n should_cast_fp16 = cast_fp16 and model.cast_half_at_training\n\n kwargs_keys = kwargs.keys()\n all_args = (*args, *kwargs.values())\n split_kwargs_index = len(all_args) - len(kwargs_keys)\n all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))\n\n if cast_device:\n all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))\n\n if should_cast_fp16:\n all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))\n\n args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]\n kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))\n\n out = fn(model, *args, **kwargs)\n return out\n return inner\n\n# gradient accumulation functions\n\ndef split_iterable(it, split_size):\n accum = []\n for ind in range(ceil(len(it) / split_size)):\n start_index = ind * split_size\n accum.append(it[start_index: (start_index + split_size)])\n return accum\n\ndef split(t, split_size = None):\n if not exists(split_size):\n return t\n\n if isinstance(t, torch.Tensor):\n return t.split(split_size, dim = 0)\n\n if isinstance(t, Iterable):\n return split_iterable(t, split_size)\n\n return TypeError\n\ndef find_first(cond, arr):\n for el in arr:\n if cond(el):\n return el\n return None\n\ndef split_args_and_kwargs(*args, split_size = None, **kwargs):\n all_args = (*args, *kwargs.values())\n len_all_args = len(all_args)\n first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)\n assert exists(first_tensor)\n\n batch_size = len(first_tensor)\n split_size = default(split_size, batch_size)\n num_chunks = ceil(batch_size / split_size)\n\n dict_len = len(kwargs)\n dict_keys = kwargs.keys()\n split_kwargs_index = len_all_args - dict_len\n\n split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]\n chunk_sizes = num_to_groups(batch_size, split_size)\n\n for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):\n chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]\n chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))\n chunk_size_frac = chunk_size / batch_size\n yield chunk_size_frac, (chunked_args, chunked_kwargs)\n\n# imagen trainer\n\ndef imagen_sample_in_chunks(fn):\n @wraps(fn)\n def inner(self, *args, max_batch_size = None, **kwargs):\n if not exists(max_batch_size):\n return fn(self, *args, **kwargs)\n\n if self.imagen.unconditional:\n batch_size = kwargs.get('batch_size')\n batch_sizes = num_to_groups(batch_size, max_batch_size)\n outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]\n else:\n outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]\n\n if isinstance(outputs[0], torch.Tensor):\n return torch.cat(outputs, dim = 0)\n\n return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/55", "ground_truth": " if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 83, "lineno": 455, "function_name": "validate_and_set_unet_being_trained"}, "groundtruth": " if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n"} +{"prompt": " was_training = model.training\n model.eval()\n out = fn(model, *args, **kwargs)\n model.train(was_training)\n return out\n return inner\n\ndef cast_torch_tensor(fn, cast_fp16 = False):\n @wraps(fn)\n def inner(model, *args, **kwargs):\n device = kwargs.pop('_device', model.device)\n cast_device = kwargs.pop('_cast_device', True)\n\n should_cast_fp16 = cast_fp16 and model.cast_half_at_training\n\n kwargs_keys = kwargs.keys()\n all_args = (*args, *kwargs.values())\n split_kwargs_index = len(all_args) - len(kwargs_keys)\n all_args = tuple(map(lambda t: torch.from_numpy(t) if exists(t) and isinstance(t, np.ndarray) else t, all_args))\n\n if cast_device:\n all_args = tuple(map(lambda t: t.to(device) if exists(t) and isinstance(t, torch.Tensor) else t, all_args))\n\n if should_cast_fp16:\n all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))\n\n args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]\n kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))\n\n out = fn(model, *args, **kwargs)\n return out\n return inner\n\n# gradient accumulation functions\n\ndef split_iterable(it, split_size):\n accum = []\n for ind in range(ceil(len(it) / split_size)):\n start_index = ind * split_size\n accum.append(it[start_index: (start_index + split_size)])\n return accum\n\ndef split(t, split_size = None):\n if not exists(split_size):\n return t\n\n if isinstance(t, torch.Tensor):\n return t.split(split_size, dim = 0)\n\n if isinstance(t, Iterable):\n return split_iterable(t, split_size)\n\n return TypeError\n\ndef find_first(cond, arr):\n for el in arr:\n if cond(el):\n return el\n return None\n\ndef split_args_and_kwargs(*args, split_size = None, **kwargs):\n all_args = (*args, *kwargs.values())\n len_all_args = len(all_args)\n first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)\n assert exists(first_tensor)\n\n batch_size = len(first_tensor)\n split_size = default(split_size, batch_size)\n num_chunks = ceil(batch_size / split_size)\n\n dict_len = len(kwargs)\n dict_keys = kwargs.keys()\n split_kwargs_index = len_all_args - dict_len\n\n split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]\n chunk_sizes = num_to_groups(batch_size, split_size)\n\n for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):\n chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]\n chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))\n chunk_size_frac = chunk_size / batch_size\n yield chunk_size_frac, (chunked_args, chunked_kwargs)\n\n# imagen trainer\n\ndef imagen_sample_in_chunks(fn):\n @wraps(fn)\n def inner(self, *args, max_batch_size = None, **kwargs):\n if not exists(max_batch_size):\n return fn(self, *args, **kwargs)\n\n if self.imagen.unconditional:\n batch_size = kwargs.get('batch_size')\n batch_sizes = num_to_groups(batch_size, max_batch_size)\n outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]\n else:\n outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]\n\n if isinstance(outputs[0], torch.Tensor):\n return torch.cat(outputs, dim = 0)\n\n return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):", "metadata": {"task_id": "lucidrains--imagen-pytorch/56", "ground_truth": " if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 106, "lineno": 469, "function_name": "wrap_unet"}, "groundtruth": " if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n"} +{"prompt": " if should_cast_fp16:\n all_args = tuple(map(lambda t: t.half() if exists(t) and isinstance(t, torch.Tensor) and t.dtype != torch.bool else t, all_args))\n\n args, kwargs_values = all_args[:split_kwargs_index], all_args[split_kwargs_index:]\n kwargs = dict(tuple(zip(kwargs_keys, kwargs_values)))\n\n out = fn(model, *args, **kwargs)\n return out\n return inner\n\n# gradient accumulation functions\n\ndef split_iterable(it, split_size):\n accum = []\n for ind in range(ceil(len(it) / split_size)):\n start_index = ind * split_size\n accum.append(it[start_index: (start_index + split_size)])\n return accum\n\ndef split(t, split_size = None):\n if not exists(split_size):\n return t\n\n if isinstance(t, torch.Tensor):\n return t.split(split_size, dim = 0)\n\n if isinstance(t, Iterable):\n return split_iterable(t, split_size)\n\n return TypeError\n\ndef find_first(cond, arr):\n for el in arr:\n if cond(el):\n return el\n return None\n\ndef split_args_and_kwargs(*args, split_size = None, **kwargs):\n all_args = (*args, *kwargs.values())\n len_all_args = len(all_args)\n first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)\n assert exists(first_tensor)\n\n batch_size = len(first_tensor)\n split_size = default(split_size, batch_size)\n num_chunks = ceil(batch_size / split_size)\n\n dict_len = len(kwargs)\n dict_keys = kwargs.keys()\n split_kwargs_index = len_all_args - dict_len\n\n split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]\n chunk_sizes = num_to_groups(batch_size, split_size)\n\n for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):\n chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]\n chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))\n chunk_size_frac = chunk_size / batch_size\n yield chunk_size_frac, (chunked_args, chunked_kwargs)\n\n# imagen trainer\n\ndef imagen_sample_in_chunks(fn):\n @wraps(fn)\n def inner(self, *args, max_batch_size = None, **kwargs):\n if not exists(max_batch_size):\n return fn(self, *args, **kwargs)\n\n if self.imagen.unconditional:\n batch_size = kwargs.get('batch_size')\n batch_sizes = num_to_groups(batch_size, max_batch_size)\n outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]\n else:\n outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]\n\n if isinstance(outputs[0], torch.Tensor):\n return torch.cat(outputs, dim = 0)\n\n return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):", "metadata": {"task_id": "lucidrains--imagen-pytorch/57", "ground_truth": " unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 129, "lineno": 494, "function_name": "set_accelerator_scaler"}, "groundtruth": " unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n"} +{"prompt": "\n for ind in range(ceil(len(it) / split_size)):\n start_index = ind * split_size\n accum.append(it[start_index: (start_index + split_size)])\n return accum\n\ndef split(t, split_size = None):\n if not exists(split_size):\n return t\n\n if isinstance(t, torch.Tensor):\n return t.split(split_size, dim = 0)\n\n if isinstance(t, Iterable):\n return split_iterable(t, split_size)\n\n return TypeError\n\ndef find_first(cond, arr):\n for el in arr:\n if cond(el):\n return el\n return None\n\ndef split_args_and_kwargs(*args, split_size = None, **kwargs):\n all_args = (*args, *kwargs.values())\n len_all_args = len(all_args)\n first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)\n assert exists(first_tensor)\n\n batch_size = len(first_tensor)\n split_size = default(split_size, batch_size)\n num_chunks = ceil(batch_size / split_size)\n\n dict_len = len(kwargs)\n dict_keys = kwargs.keys()\n split_kwargs_index = len_all_args - dict_len\n\n split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]\n chunk_sizes = num_to_groups(batch_size, split_size)\n\n for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):\n chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]\n chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))\n chunk_size_frac = chunk_size / batch_size\n yield chunk_size_frac, (chunked_args, chunked_kwargs)\n\n# imagen trainer\n\ndef imagen_sample_in_chunks(fn):\n @wraps(fn)\n def inner(self, *args, max_batch_size = None, **kwargs):\n if not exists(max_batch_size):\n return fn(self, *args, **kwargs)\n\n if self.imagen.unconditional:\n batch_size = kwargs.get('batch_size')\n batch_sizes = num_to_groups(batch_size, max_batch_size)\n outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]\n else:\n outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]\n\n if isinstance(outputs[0], torch.Tensor):\n return torch.cat(outputs, dim = 0)\n\n return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):\n unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n\n # helper print\n\n def print(self, msg):\n if not self.is_main:\n return\n\n if not self.verbose:\n return\n\n return self.accelerator.print(msg)\n\n # validating the unet number\n\n def validate_unet_number(self, unet_number = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/58", "ground_truth": " if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 142, "lineno": 515, "function_name": "validate_unet_number"}, "groundtruth": " if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n"} +{"prompt": " return t.split(split_size, dim = 0)\n\n if isinstance(t, Iterable):\n return split_iterable(t, split_size)\n\n return TypeError\n\ndef find_first(cond, arr):\n for el in arr:\n if cond(el):\n return el\n return None\n\ndef split_args_and_kwargs(*args, split_size = None, **kwargs):\n all_args = (*args, *kwargs.values())\n len_all_args = len(all_args)\n first_tensor = find_first(lambda t: isinstance(t, torch.Tensor), all_args)\n assert exists(first_tensor)\n\n batch_size = len(first_tensor)\n split_size = default(split_size, batch_size)\n num_chunks = ceil(batch_size / split_size)\n\n dict_len = len(kwargs)\n dict_keys = kwargs.keys()\n split_kwargs_index = len_all_args - dict_len\n\n split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]\n chunk_sizes = num_to_groups(batch_size, split_size)\n\n for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):\n chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]\n chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))\n chunk_size_frac = chunk_size / batch_size\n yield chunk_size_frac, (chunked_args, chunked_kwargs)\n\n# imagen trainer\n\ndef imagen_sample_in_chunks(fn):\n @wraps(fn)\n def inner(self, *args, max_batch_size = None, **kwargs):\n if not exists(max_batch_size):\n return fn(self, *args, **kwargs)\n\n if self.imagen.unconditional:\n batch_size = kwargs.get('batch_size')\n batch_sizes = num_to_groups(batch_size, max_batch_size)\n outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]\n else:\n outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]\n\n if isinstance(outputs[0], torch.Tensor):\n return torch.cat(outputs, dim = 0)\n\n return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):\n unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n\n # helper print\n\n def print(self, msg):\n if not self.is_main:\n return\n\n if not self.verbose:\n return\n\n return self.accelerator.print(msg)\n\n # validating the unet number\n\n def validate_unet_number(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n\n # number of training steps taken\n\n def num_steps_taken(self, unet_number = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/59", "ground_truth": " if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n return self.steps[unet_number - 1].item()\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 153, "lineno": 524, "function_name": "num_steps_taken"}, "groundtruth": " if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n return self.steps[unet_number - 1].item()\n"} +{"prompt": "args)\n dict_keys = kwargs.keys()\n split_kwargs_index = len_all_args - dict_len\n\n split_all_args = [split(arg, split_size = split_size) if exists(arg) and isinstance(arg, (torch.Tensor, Iterable)) else ((arg,) * num_chunks) for arg in all_args]\n chunk_sizes = num_to_groups(batch_size, split_size)\n\n for (chunk_size, *chunked_all_args) in tuple(zip(chunk_sizes, *split_all_args)):\n chunked_args, chunked_kwargs_values = chunked_all_args[:split_kwargs_index], chunked_all_args[split_kwargs_index:]\n chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))\n chunk_size_frac = chunk_size / batch_size\n yield chunk_size_frac, (chunked_args, chunked_kwargs)\n\n# imagen trainer\n\ndef imagen_sample_in_chunks(fn):\n @wraps(fn)\n def inner(self, *args, max_batch_size = None, **kwargs):\n if not exists(max_batch_size):\n return fn(self, *args, **kwargs)\n\n if self.imagen.unconditional:\n batch_size = kwargs.get('batch_size')\n batch_sizes = num_to_groups(batch_size, max_batch_size)\n outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]\n else:\n outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]\n\n if isinstance(outputs[0], torch.Tensor):\n return torch.cat(outputs, dim = 0)\n\n return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):\n unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n\n # helper print\n\n def print(self, msg):\n if not self.is_main:\n return\n\n if not self.verbose:\n return\n\n return self.accelerator.print(msg)\n\n # validating the unet number\n\n def validate_unet_number(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n\n # number of training steps taken\n\n def num_steps_taken(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n return self.steps[unet_number - 1].item()\n\n def print_untrained_unets(self):\n print_final_error = False\n\n for ind, (steps, unet) in enumerate(zip(self.steps.tolist(), self.imagen.unets)):\n if steps > 0 or isinstance(unet, NullUnet):\n continue\n\n self.print(f'unet {ind + 1} has not been trained')\n print_final_error = True\n\n if print_final_error:\n self.print('when sampling, you can pass stop_at_unet_number to stop early in the cascade, so it does not try to generate with untrained unets')\n\n # data related functions\n\n def add_train_dataloader(self, dl = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/60", "ground_truth": " if not exists(dl):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.train_dl = dl\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 176, "lineno": 545, "function_name": "add_train_dataloader"}, "groundtruth": " if not exists(dl):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.train_dl = dl\n"} +{"prompt": "], chunked_all_args[split_kwargs_index:]\n chunked_kwargs = dict(tuple(zip(dict_keys, chunked_kwargs_values)))\n chunk_size_frac = chunk_size / batch_size\n yield chunk_size_frac, (chunked_args, chunked_kwargs)\n\n# imagen trainer\n\ndef imagen_sample_in_chunks(fn):\n @wraps(fn)\n def inner(self, *args, max_batch_size = None, **kwargs):\n if not exists(max_batch_size):\n return fn(self, *args, **kwargs)\n\n if self.imagen.unconditional:\n batch_size = kwargs.get('batch_size')\n batch_sizes = num_to_groups(batch_size, max_batch_size)\n outputs = [fn(self, *args, **{**kwargs, 'batch_size': sub_batch_size}) for sub_batch_size in batch_sizes]\n else:\n outputs = [fn(self, *chunked_args, **chunked_kwargs) for _, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs)]\n\n if isinstance(outputs[0], torch.Tensor):\n return torch.cat(outputs, dim = 0)\n\n return list(map(lambda t: torch.cat(t, dim = 0), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):\n unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n\n # helper print\n\n def print(self, msg):\n if not self.is_main:\n return\n\n if not self.verbose:\n return\n\n return self.accelerator.print(msg)\n\n # validating the unet number\n\n def validate_unet_number(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n\n # number of training steps taken\n\n def num_steps_taken(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n return self.steps[unet_number - 1].item()\n\n def print_untrained_unets(self):\n print_final_error = False\n\n for ind, (steps, unet) in enumerate(zip(self.steps.tolist(), self.imagen.unets)):\n if steps > 0 or isinstance(unet, NullUnet):\n continue\n\n self.print(f'unet {ind + 1} has not been trained')\n print_final_error = True\n\n if print_final_error:\n self.print('when sampling, you can pass stop_at_unet_number to stop early in the cascade, so it does not try to generate with untrained unets')\n\n # data related functions\n\n def add_train_dataloader(self, dl = None):\n if not exists(dl):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.train_dl = dl\n\n def add_valid_dataloader(self, dl):\n if not exists(dl):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.valid_dl = dl\n\n def add_train_dataset(self, ds = None, *, batch_size, **dl_kwargs):", "metadata": {"task_id": "lucidrains--imagen-pytorch/61", "ground_truth": " if not exists(ds):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n\n valid_ds = None\n if self.split_valid_from_train:\n train_size = int((1 - self.split_valid_fraction) * len(ds))\n valid_size = len(ds) - train_size\n\n ds, valid_ds = random_split(ds, [train_size, valid_size], generator = torch.Generator().manual_seed(self.split_random_seed))\n self.print(f'training with dataset of {len(ds)} samples and validating with randomly splitted {len(valid_ds)} samples')\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_train_dataloader(dl)\n\n if not self.split_valid_from_train:\n return\n\n self.add_valid_dataset(valid_ds, batch_size = batch_size, **dl_kwargs)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 184, "lineno": 561, "function_name": "add_train_dataset"}, "groundtruth": " if not exists(ds):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n\n valid_ds = None\n if self.split_valid_from_train:\n train_size = int((1 - self.split_valid_fraction) * len(ds))\n valid_size = len(ds) - train_size\n\n ds, valid_ds = random_split(ds, [train_size, valid_size], generator = torch.Generator().manual_seed(self.split_random_seed))\n self.print(f'training with dataset of {len(ds)} samples and validating with randomly splitted {len(valid_ds)} samples')\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_train_dataloader(dl)\n\n if not self.split_valid_from_train:\n return\n\n self.add_valid_dataset(valid_ds, batch_size = batch_size, **dl_kwargs)\n"} +{"prompt": "), list(zip(*outputs))))\n\n return inner\n\n\ndef restore_parts(state_dict_target, state_dict_from):\n for name, param in state_dict_from.items():\n\n if name not in state_dict_target:\n continue\n\n if param.size() == state_dict_target[name].size():\n state_dict_target[name].copy_(param)\n else:\n print(f\"layer {name}({param.size()} different than target: {state_dict_target[name].size()}\")\n\n return state_dict_target\n\n\nclass ImagenTrainer(nn.Module):\n locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):\n unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n\n # helper print\n\n def print(self, msg):\n if not self.is_main:\n return\n\n if not self.verbose:\n return\n\n return self.accelerator.print(msg)\n\n # validating the unet number\n\n def validate_unet_number(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n\n # number of training steps taken\n\n def num_steps_taken(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n return self.steps[unet_number - 1].item()\n\n def print_untrained_unets(self):\n print_final_error = False\n\n for ind, (steps, unet) in enumerate(zip(self.steps.tolist(), self.imagen.unets)):\n if steps > 0 or isinstance(unet, NullUnet):\n continue\n\n self.print(f'unet {ind + 1} has not been trained')\n print_final_error = True\n\n if print_final_error:\n self.print('when sampling, you can pass stop_at_unet_number to stop early in the cascade, so it does not try to generate with untrained unets')\n\n # data related functions\n\n def add_train_dataloader(self, dl = None):\n if not exists(dl):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.train_dl = dl\n\n def add_valid_dataloader(self, dl):\n if not exists(dl):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.valid_dl = dl\n\n def add_train_dataset(self, ds = None, *, batch_size, **dl_kwargs):\n if not exists(ds):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n\n valid_ds = None\n if self.split_valid_from_train:\n train_size = int((1 - self.split_valid_fraction) * len(ds))\n valid_size = len(ds) - train_size\n\n ds, valid_ds = random_split(ds, [train_size, valid_size], generator = torch.Generator().manual_seed(self.split_random_seed))\n self.print(f'training with dataset of {len(ds)} samples and validating with randomly splitted {len(valid_ds)} samples')\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_train_dataloader(dl)\n\n if not self.split_valid_from_train:\n return\n\n self.add_valid_dataset(valid_ds, batch_size = batch_size, **dl_kwargs)\n\n def add_valid_dataset(self, ds, *, batch_size, **dl_kwargs):\n if not exists(ds):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_valid_dataloader(dl)\n\n def create_train_iter(self):", "metadata": {"task_id": "lucidrains--imagen-pytorch/62", "ground_truth": " assert exists(self.train_dl), 'training dataloader has not been registered with the trainer yet'\n\n if exists(self.train_dl_iter):\n return\n\n self.train_dl_iter = cycle(self.train_dl)\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 207, "lineno": 592, "function_name": "create_train_iter"}, "groundtruth": " assert exists(self.train_dl), 'training dataloader has not been registered with the trainer yet'\n\n if exists(self.train_dl_iter):\n return\n\n self.train_dl_iter = cycle(self.train_dl)\n"} +{"prompt": " locked = False\n\n def __init__(\n self,\n imagen = None,\n imagen_checkpoint_path = None,\n use_ema = True,\n lr = 1e-4,\n eps = 1e-8,\n beta1 = 0.9,\n beta2 = 0.99,\n max_grad_norm = None,\n group_wd_params = True,\n warmup_steps = None,\n cosine_decay_max_steps = None,\n only_train_unet_number = None,\n fp16 = False,\n precision = None,\n split_batches = True,\n dl_tuple_output_keywords_names = ('images', 'text_embeds', 'text_masks', 'cond_images'),\n verbose = True,\n split_valid_fraction = 0.025,\n split_valid_from_train = False,\n split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):\n unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n\n # helper print\n\n def print(self, msg):\n if not self.is_main:\n return\n\n if not self.verbose:\n return\n\n return self.accelerator.print(msg)\n\n # validating the unet number\n\n def validate_unet_number(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n\n # number of training steps taken\n\n def num_steps_taken(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n return self.steps[unet_number - 1].item()\n\n def print_untrained_unets(self):\n print_final_error = False\n\n for ind, (steps, unet) in enumerate(zip(self.steps.tolist(), self.imagen.unets)):\n if steps > 0 or isinstance(unet, NullUnet):\n continue\n\n self.print(f'unet {ind + 1} has not been trained')\n print_final_error = True\n\n if print_final_error:\n self.print('when sampling, you can pass stop_at_unet_number to stop early in the cascade, so it does not try to generate with untrained unets')\n\n # data related functions\n\n def add_train_dataloader(self, dl = None):\n if not exists(dl):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.train_dl = dl\n\n def add_valid_dataloader(self, dl):\n if not exists(dl):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.valid_dl = dl\n\n def add_train_dataset(self, ds = None, *, batch_size, **dl_kwargs):\n if not exists(ds):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n\n valid_ds = None\n if self.split_valid_from_train:\n train_size = int((1 - self.split_valid_fraction) * len(ds))\n valid_size = len(ds) - train_size\n\n ds, valid_ds = random_split(ds, [train_size, valid_size], generator = torch.Generator().manual_seed(self.split_random_seed))\n self.print(f'training with dataset of {len(ds)} samples and validating with randomly splitted {len(valid_ds)} samples')\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_train_dataloader(dl)\n\n if not self.split_valid_from_train:\n return\n\n self.add_valid_dataset(valid_ds, batch_size = batch_size, **dl_kwargs)\n\n def add_valid_dataset(self, ds, *, batch_size, **dl_kwargs):\n if not exists(ds):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_valid_dataloader(dl)\n\n def create_train_iter(self):\n assert exists(self.train_dl), 'training dataloader has not been registered with the trainer yet'\n\n if exists(self.train_dl_iter):\n return\n\n self.train_dl_iter = cycle(self.train_dl)\n\n def create_valid_iter(self):\n assert exists(self.valid_dl), 'validation dataloader has not been registered with the trainer yet'\n\n if exists(self.valid_dl_iter):\n return\n\n self.valid_dl_iter = cycle(self.valid_dl)\n\n def train_step(self, unet_number = None, **kwargs):", "metadata": {"task_id": "lucidrains--imagen-pytorch/63", "ground_truth": " if not self.prepared:\n self.prepare()\n self.create_train_iter()\n loss = self.step_with_dl_iter(self.train_dl_iter, unet_number = unet_number, **kwargs)\n self.update(unet_number = unet_number)\n return loss\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 227, "lineno": 608, "function_name": "train_step"}, "groundtruth": " if not self.prepared:\n self.prepare()\n self.create_train_iter()\n loss = self.step_with_dl_iter(self.train_dl_iter, unet_number = unet_number, **kwargs)\n self.update(unet_number = unet_number)\n return loss\n"} +{"prompt": " split_random_seed = 42,\n checkpoint_path = None,\n checkpoint_every = None,\n checkpoint_fs = None,\n fs_kwargs: dict = None,\n max_checkpoints_keep = 20,\n use_lion = False,\n **kwargs\n ):\n super().__init__()\n assert not ImagenTrainer.locked, 'ImagenTrainer can only be initialized once per process - for the sake of distributed training, you will now have to create a separate script to train each unet (or a script that accepts unet number as an argument)'\n assert exists(imagen) ^ exists(imagen_checkpoint_path), 'either imagen instance is passed into the trainer, or a checkpoint path that contains the imagen config'\n\n # determine filesystem, using fsspec, for saving to local filesystem or cloud\n\n self.fs = checkpoint_fs\n\n if not exists(self.fs):\n fs_kwargs = default(fs_kwargs, {})\n self.fs, _ = url_to_fs(default(checkpoint_path, './'), **fs_kwargs)\n\n assert isinstance(imagen, (Imagen, ElucidatedImagen))\n ema_kwargs, kwargs = groupby_prefix_and_trim('ema_', kwargs)\n\n # elucidated or not\n\n self.is_elucidated = isinstance(imagen, ElucidatedImagen)\n\n # create accelerator instance\n\n accelerate_kwargs, kwargs = groupby_prefix_and_trim('accelerate_', kwargs)\n\n assert not (fp16 and exists(precision)), 'either set fp16 = True or forward the precision (\"fp16\", \"bf16\") to Accelerator'\n accelerator_mixed_precision = default(precision, 'fp16' if fp16 else 'no')\n\n self.accelerator = Accelerator(**{\n 'split_batches': split_batches,\n 'mixed_precision': accelerator_mixed_precision,\n 'kwargs_handlers': [DistributedDataParallelKwargs(find_unused_parameters = True)]\n , **accelerate_kwargs})\n\n ImagenTrainer.locked = self.is_distributed\n\n # cast data to fp16 at training time if needed\n\n self.cast_half_at_training = accelerator_mixed_precision == 'fp16'\n\n # grad scaler must be managed outside of accelerator\n\n grad_scaler_enabled = fp16\n\n # imagen, unets and ema unets\n\n self.imagen = imagen\n self.num_unets = len(self.imagen.unets)\n\n self.use_ema = use_ema and self.is_main\n self.ema_unets = nn.ModuleList([])\n\n # keep track of what unet is being trained on\n # only going to allow 1 unet training at a time\n\n self.ema_unet_being_trained_index = -1 # keeps track of which ema unet is being trained on\n\n # data related functions\n\n self.train_dl_iter = None\n self.train_dl = None\n\n self.valid_dl_iter = None\n self.valid_dl = None\n\n self.dl_tuple_output_keywords_names = dl_tuple_output_keywords_names\n\n # auto splitting validation from training, if dataset is passed in\n\n self.split_valid_from_train = split_valid_from_train\n\n assert 0 <= split_valid_fraction <= 1, 'split valid fraction must be between 0 and 1'\n self.split_valid_fraction = split_valid_fraction\n self.split_random_seed = split_random_seed\n\n # be able to finely customize learning rate, weight decay\n # per unet\n\n lr, eps, warmup_steps, cosine_decay_max_steps = map(partial(cast_tuple, length = self.num_unets), (lr, eps, warmup_steps, cosine_decay_max_steps))\n\n for ind, (unet, unet_lr, unet_eps, unet_warmup_steps, unet_cosine_decay_max_steps) in enumerate(zip(self.imagen.unets, lr, eps, warmup_steps, cosine_decay_max_steps)):\n\n if use_lion:\n optimizer = Lion(\n unet.parameters(),\n lr = unet_lr,\n betas = (beta1, beta2),\n use_triton = True\n )\n else:\n optimizer = Adam(\n unet.parameters(),\n lr = unet_lr,\n eps = unet_eps,\n betas = (beta1, beta2),\n **kwargs\n )\n\n if self.use_ema:\n self.ema_unets.append(EMA(unet, **ema_kwargs))\n\n scaler = GradScaler(enabled = grad_scaler_enabled)\n\n scheduler = warmup_scheduler = None\n\n if exists(unet_cosine_decay_max_steps):\n scheduler = CosineAnnealingLR(optimizer, T_max = unet_cosine_decay_max_steps)\n\n if exists(unet_warmup_steps):\n warmup_scheduler = warmup.LinearWarmup(optimizer, warmup_period = unet_warmup_steps)\n\n if not exists(scheduler):\n scheduler = LambdaLR(optimizer, lr_lambda = lambda step: 1.0)\n\n # set on object\n\n setattr(self, f'optim{ind}', optimizer) # cannot use pytorch ModuleList for some reason with optimizers\n setattr(self, f'scaler{ind}', scaler)\n setattr(self, f'scheduler{ind}', scheduler)\n setattr(self, f'warmup{ind}', warmup_scheduler)\n\n # gradient clipping if needed\n\n self.max_grad_norm = max_grad_norm\n\n # step tracker and misc\n\n self.register_buffer('steps', torch.tensor([0] * self.num_unets))\n\n self.verbose = verbose\n\n # automatic set devices based on what accelerator decided\n\n self.imagen.to(self.device)\n self.to(self.device)\n\n # checkpointing\n\n assert not (exists(checkpoint_path) ^ exists(checkpoint_every))\n self.checkpoint_path = checkpoint_path\n self.checkpoint_every = checkpoint_every\n self.max_checkpoints_keep = max_checkpoints_keep\n\n self.can_checkpoint = self.is_local_main if isinstance(checkpoint_fs, LocalFileSystem) else self.is_main\n\n if exists(checkpoint_path) and self.can_checkpoint:\n bucket = url_to_bucket(checkpoint_path)\n\n if not self.fs.exists(bucket):\n self.fs.mkdir(bucket)\n\n self.load_from_checkpoint_folder()\n\n # only allowing training for unet\n\n self.only_train_unet_number = only_train_unet_number\n self.prepared = False\n\n\n def prepare(self):\n assert not self.prepared, f'The trainer is allready prepared'\n self.validate_and_set_unet_being_trained(self.only_train_unet_number)\n self.prepared = True\n # computed values\n\n @property\n def device(self):\n return self.accelerator.device\n\n @property\n def is_distributed(self):\n return not (self.accelerator.distributed_type == DistributedType.NO and self.accelerator.num_processes == 1)\n\n @property\n def is_main(self):\n return self.accelerator.is_main_process\n\n @property\n def is_local_main(self):\n return self.accelerator.is_local_main_process\n\n @property\n def unwrapped_unet(self):\n return self.accelerator.unwrap_model(self.unet_being_trained)\n\n # optimizer helper functions\n\n def get_lr(self, unet_number):\n self.validate_unet_number(unet_number)\n unet_index = unet_number - 1\n\n optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):\n unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n\n # helper print\n\n def print(self, msg):\n if not self.is_main:\n return\n\n if not self.verbose:\n return\n\n return self.accelerator.print(msg)\n\n # validating the unet number\n\n def validate_unet_number(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n\n # number of training steps taken\n\n def num_steps_taken(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n return self.steps[unet_number - 1].item()\n\n def print_untrained_unets(self):\n print_final_error = False\n\n for ind, (steps, unet) in enumerate(zip(self.steps.tolist(), self.imagen.unets)):\n if steps > 0 or isinstance(unet, NullUnet):\n continue\n\n self.print(f'unet {ind + 1} has not been trained')\n print_final_error = True\n\n if print_final_error:\n self.print('when sampling, you can pass stop_at_unet_number to stop early in the cascade, so it does not try to generate with untrained unets')\n\n # data related functions\n\n def add_train_dataloader(self, dl = None):\n if not exists(dl):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.train_dl = dl\n\n def add_valid_dataloader(self, dl):\n if not exists(dl):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.valid_dl = dl\n\n def add_train_dataset(self, ds = None, *, batch_size, **dl_kwargs):\n if not exists(ds):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n\n valid_ds = None\n if self.split_valid_from_train:\n train_size = int((1 - self.split_valid_fraction) * len(ds))\n valid_size = len(ds) - train_size\n\n ds, valid_ds = random_split(ds, [train_size, valid_size], generator = torch.Generator().manual_seed(self.split_random_seed))\n self.print(f'training with dataset of {len(ds)} samples and validating with randomly splitted {len(valid_ds)} samples')\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_train_dataloader(dl)\n\n if not self.split_valid_from_train:\n return\n\n self.add_valid_dataset(valid_ds, batch_size = batch_size, **dl_kwargs)\n\n def add_valid_dataset(self, ds, *, batch_size, **dl_kwargs):\n if not exists(ds):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_valid_dataloader(dl)\n\n def create_train_iter(self):\n assert exists(self.train_dl), 'training dataloader has not been registered with the trainer yet'\n\n if exists(self.train_dl_iter):\n return\n\n self.train_dl_iter = cycle(self.train_dl)\n\n def create_valid_iter(self):\n assert exists(self.valid_dl), 'validation dataloader has not been registered with the trainer yet'\n\n if exists(self.valid_dl_iter):\n return\n\n self.valid_dl_iter = cycle(self.valid_dl)\n\n def train_step(self, unet_number = None, **kwargs):\n if not self.prepared:\n self.prepare()\n self.create_train_iter()\n loss = self.step_with_dl_iter(self.train_dl_iter, unet_number = unet_number, **kwargs)\n self.update(unet_number = unet_number)\n return loss\n\n @torch.no_grad()\n @eval_decorator\n def valid_step(self, **kwargs):\n if not self.prepared:\n self.prepare()\n self.create_valid_iter()\n context = self.use_ema_unets if kwargs.pop('use_ema_unets', False) else nullcontext\n with context():\n loss = self.step_with_dl_iter(self.valid_dl_iter, **kwargs)\n return loss\n\n def step_with_dl_iter(self, dl_iter, **kwargs):", "metadata": {"task_id": "lucidrains--imagen-pytorch/64", "ground_truth": " dl_tuple_output = cast_tuple(next(dl_iter))\n model_input = dict(list(zip(self.dl_tuple_output_keywords_names, dl_tuple_output)))\n loss = self.forward(**{**kwargs, **model_input})\n return loss\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 250, "lineno": 627, "function_name": "step_with_dl_iter"}, "groundtruth": " dl_tuple_output = cast_tuple(next(dl_iter))\n model_input = dict(list(zip(self.dl_tuple_output_keywords_names, dl_tuple_output)))\n loss = self.forward(**{**kwargs, **model_input})\n return loss\n"} +{"prompt": " optim = getattr(self, f'optim{unet_index}')\n\n return optim.param_groups[0]['lr']\n\n # function for allowing only one unet from being trained at a time\n\n def validate_and_set_unet_being_trained(self, unet_number = None):\n if exists(unet_number):\n self.validate_unet_number(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, 'you cannot only train on one unet at a time. you will need to save the trainer into a checkpoint, and resume training on a new unet'\n\n self.only_train_unet_number = unet_number\n self.imagen.only_train_unet_number = unet_number\n\n if not exists(unet_number):\n return\n\n self.wrap_unet(unet_number)\n\n def wrap_unet(self, unet_number):\n if hasattr(self, 'one_unet_wrapped'):\n return\n\n unet = self.imagen.get_unet(unet_number)\n unet_index = unet_number - 1\n\n optimizer = getattr(self, f'optim{unet_index}')\n scheduler = getattr(self, f'scheduler{unet_index}')\n\n if self.train_dl:\n self.unet_being_trained, self.train_dl, optimizer = self.accelerator.prepare(unet, self.train_dl, optimizer)\n else:\n self.unet_being_trained, optimizer = self.accelerator.prepare(unet, optimizer)\n\n if exists(scheduler):\n scheduler = self.accelerator.prepare(scheduler)\n\n setattr(self, f'optim{unet_index}', optimizer)\n setattr(self, f'scheduler{unet_index}', scheduler)\n\n self.one_unet_wrapped = True\n\n # hacking accelerator due to not having separate gradscaler per optimizer\n\n def set_accelerator_scaler(self, unet_number):\n unet_number = self.validate_unet_number(unet_number)\n scaler = getattr(self, f'scaler{unet_number - 1}')\n\n self.accelerator.scaler = scaler\n for optimizer in self.accelerator._optimizers:\n optimizer.scaler = scaler\n\n # helper print\n\n def print(self, msg):\n if not self.is_main:\n return\n\n if not self.verbose:\n return\n\n return self.accelerator.print(msg)\n\n # validating the unet number\n\n def validate_unet_number(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n assert 0 < unet_number <= self.num_unets, f'unet number should be in between 1 and {self.num_unets}'\n return unet_number\n\n # number of training steps taken\n\n def num_steps_taken(self, unet_number = None):\n if self.num_unets == 1:\n unet_number = default(unet_number, 1)\n\n return self.steps[unet_number - 1].item()\n\n def print_untrained_unets(self):\n print_final_error = False\n\n for ind, (steps, unet) in enumerate(zip(self.steps.tolist(), self.imagen.unets)):\n if steps > 0 or isinstance(unet, NullUnet):\n continue\n\n self.print(f'unet {ind + 1} has not been trained')\n print_final_error = True\n\n if print_final_error:\n self.print('when sampling, you can pass stop_at_unet_number to stop early in the cascade, so it does not try to generate with untrained unets')\n\n # data related functions\n\n def add_train_dataloader(self, dl = None):\n if not exists(dl):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.train_dl = dl\n\n def add_valid_dataloader(self, dl):\n if not exists(dl):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n assert not self.prepared, f'You need to add the dataset before preperation'\n self.valid_dl = dl\n\n def add_train_dataset(self, ds = None, *, batch_size, **dl_kwargs):\n if not exists(ds):\n return\n\n assert not exists(self.train_dl), 'training dataloader was already added'\n\n valid_ds = None\n if self.split_valid_from_train:\n train_size = int((1 - self.split_valid_fraction) * len(ds))\n valid_size = len(ds) - train_size\n\n ds, valid_ds = random_split(ds, [train_size, valid_size], generator = torch.Generator().manual_seed(self.split_random_seed))\n self.print(f'training with dataset of {len(ds)} samples and validating with randomly splitted {len(valid_ds)} samples')\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_train_dataloader(dl)\n\n if not self.split_valid_from_train:\n return\n\n self.add_valid_dataset(valid_ds, batch_size = batch_size, **dl_kwargs)\n\n def add_valid_dataset(self, ds, *, batch_size, **dl_kwargs):\n if not exists(ds):\n return\n\n assert not exists(self.valid_dl), 'validation dataloader was already added'\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_valid_dataloader(dl)\n\n def create_train_iter(self):\n assert exists(self.train_dl), 'training dataloader has not been registered with the trainer yet'\n\n if exists(self.train_dl_iter):\n return\n\n self.train_dl_iter = cycle(self.train_dl)\n\n def create_valid_iter(self):\n assert exists(self.valid_dl), 'validation dataloader has not been registered with the trainer yet'\n\n if exists(self.valid_dl_iter):\n return\n\n self.valid_dl_iter = cycle(self.valid_dl)\n\n def train_step(self, unet_number = None, **kwargs):\n if not self.prepared:\n self.prepare()\n self.create_train_iter()\n loss = self.step_with_dl_iter(self.train_dl_iter, unet_number = unet_number, **kwargs)\n self.update(unet_number = unet_number)\n return loss\n\n @torch.no_grad()\n @eval_decorator\n def valid_step(self, **kwargs):\n if not self.prepared:\n self.prepare()\n self.create_valid_iter()\n context = self.use_ema_unets if kwargs.pop('use_ema_unets', False) else nullcontext\n with context():\n loss = self.step_with_dl_iter(self.valid_dl_iter, **kwargs)\n return loss\n\n def step_with_dl_iter(self, dl_iter, **kwargs):\n dl_tuple_output = cast_tuple(next(dl_iter))\n model_input = dict(list(zip(self.dl_tuple_output_keywords_names, dl_tuple_output)))\n loss = self.forward(**{**kwargs, **model_input})\n return loss\n\n # checkpointing functions\n\n @property\n def all_checkpoints_sorted(self):\n glob_pattern = os.path.join(self.checkpoint_path, '*.pt')\n checkpoints = self.fs.glob(glob_pattern)\n sorted_checkpoints = sorted(checkpoints, key = lambda x: int(str(x).split('.')[-2]), reverse = True)\n return sorted_checkpoints\n\n def load_from_checkpoint_folder(self, last_total_steps = -1):\n if last_total_steps != -1:\n filepath = os.path.join(self.checkpoint_path, f'checkpoint.{last_total_steps}.pt')\n self.load(filepath)\n return\n\n sorted_checkpoints = self.all_checkpoints_sorted\n\n if len(sorted_checkpoints) == 0:\n self.print(f'no checkpoints found to load from at {self.checkpoint_path}')\n return\n\n last_checkpoint = sorted_checkpoints[0]\n self.load(last_checkpoint)\n\n def save_to_checkpoint_folder(self):\n self.accelerator.wait_for_everyone()\n\n if not self.can_checkpoint:\n return\n\n total_steps = int(self.steps.sum().item())\n filepath = os.path.join(self.checkpoint_path, f'checkpoint.{total_steps}.pt')\n\n self.save(filepath)\n\n if self.max_checkpoints_keep <= 0:\n return\n\n sorted_checkpoints = self.all_checkpoints_sorted\n checkpoints_to_discard = sorted_checkpoints[self.max_checkpoints_keep:]\n\n for checkpoint in checkpoints_to_discard:\n self.fs.rm(checkpoint)\n\n # saving and loading functions\n\n def save(\n self,\n path,\n overwrite = True,\n without_optim_and_sched = False,\n **kwargs\n ):\n self.accelerator.wait_for_everyone()\n\n if not self.can_checkpoint:\n return\n\n fs = self.fs\n\n assert not (fs.exists(path) and not overwrite)\n\n self.reset_ema_unets_all_one_device()\n\n save_obj = dict(\n model = self.imagen.state_dict(),\n version = __version__,\n steps = self.steps.cpu(),\n **kwargs\n )\n\n save_optim_and_sched_iter = range(0, self.num_unets) if not without_optim_and_sched else tuple()\n\n for ind in save_optim_and_sched_iter:\n scaler_key = f'scaler{ind}'\n optimizer_key = f'optim{ind}'\n scheduler_key = f'scheduler{ind}'\n warmup_scheduler_key = f'warmup{ind}'\n\n scaler = getattr(self, scaler_key)\n optimizer = getattr(self, optimizer_key)\n scheduler = getattr(self, scheduler_key)\n warmup_scheduler = getattr(self, warmup_scheduler_key)\n\n if exists(scheduler):\n save_obj = {**save_obj, scheduler_key: scheduler.state_dict()}\n\n if exists(warmup_scheduler):\n save_obj = {**save_obj, warmup_scheduler_key: warmup_scheduler.state_dict()}\n\n save_obj = {**save_obj, scaler_key: scaler.state_dict(), optimizer_key: optimizer.state_dict()}\n\n if self.use_ema:\n save_obj = {**save_obj, 'ema': self.ema_unets.state_dict()}\n\n # determine if imagen config is available\n\n if hasattr(self.imagen, '_config'):\n self.print(f'this checkpoint is commandable from the CLI - \"imagen --model {str(path)} \\\"\\\"\"')\n\n save_obj = {\n **save_obj,\n 'imagen_type': 'elucidated' if self.is_elucidated else 'original',\n 'imagen_params': self.imagen._config\n }\n\n #save to path\n\n with fs.open(path, 'wb') as f:\n torch.save(save_obj, f)\n\n self.print(f'checkpoint saved to {path}')\n\n def load(self, path, only_model = False, strict = True, noop_if_not_exist = False):\n fs = self.fs\n\n if noop_if_not_exist and not fs.exists(path):\n self.print(f'trainer checkpoint not found at {str(path)}')\n return\n\n assert fs.exists(path), f'{path} does not exist'\n\n self.reset_ema_unets_all_one_device()\n\n # to avoid extra GPU memory usage in main process when using Accelerate\n\n with fs.open(path) as f:\n loaded_obj = torch.load(f, map_location='cpu')\n\n if version.parse(__version__) != version.parse(loaded_obj['version']):\n self.print(f'loading saved imagen at version {loaded_obj[\"version\"]}, but current package version is {__version__}')\n\n try:\n self.imagen.load_state_dict(loaded_obj['model'], strict = strict)\n except RuntimeError:\n print(\"Failed loading state dict. Trying partial load\")\n self.imagen.load_state_dict(restore_parts(self.imagen.state_dict(),\n loaded_obj['model']))\n\n if only_model:\n return loaded_obj\n\n self.steps.copy_(loaded_obj['steps'])\n\n for ind in range(0, self.num_unets):\n scaler_key = f'scaler{ind}'\n optimizer_key = f'optim{ind}'\n scheduler_key = f'scheduler{ind}'\n warmup_scheduler_key = f'warmup{ind}'\n\n scaler = getattr(self, scaler_key)\n optimizer = getattr(self, optimizer_key)\n scheduler = getattr(self, scheduler_key)\n warmup_scheduler = getattr(self, warmup_scheduler_key)\n\n if exists(scheduler) and scheduler_key in loaded_obj:\n scheduler.load_state_dict(loaded_obj[scheduler_key])\n\n if exists(warmup_scheduler) and warmup_scheduler_key in loaded_obj:\n warmup_scheduler.load_state_dict(loaded_obj[warmup_scheduler_key])\n\n if exists(optimizer):\n try:\n optimizer.load_state_dict(loaded_obj[optimizer_key])\n scaler.load_state_dict(loaded_obj[scaler_key])\n except:\n self.print('could not load optimizer and scaler, possibly because you have turned on mixed precision training since the last run. resuming with new optimizer and scalers')\n\n if self.use_ema:\n assert 'ema' in loaded_obj\n try:\n self.ema_unets.load_state_dict(loaded_obj['ema'], strict = strict)\n except RuntimeError:\n print(\"Failed loading state dict. Trying partial load\")\n self.ema_unets.load_state_dict(restore_parts(self.ema_unets.state_dict(),\n loaded_obj['ema']))\n\n self.print(f'checkpoint loaded from {path}')\n return loaded_obj\n\n # managing ema unets and their devices\n\n @property\n def unets(self):\n return nn.ModuleList([ema.ema_model for ema in self.ema_unets])\n\n def get_ema_unet(self, unet_number = None):", "metadata": {"task_id": "lucidrains--imagen-pytorch/65", "ground_truth": " if not self.use_ema:\n return\n\n unet_number = self.validate_unet_number(unet_number)\n index = unet_number - 1\n\n if isinstance(self.unets, nn.ModuleList):\n unets_list = [unet for unet in self.ema_unets]\n delattr(self, 'ema_unets')\n self.ema_unets = unets_list\n\n if index != self.ema_unet_being_trained_index:\n for unet_index, unet in enumerate(self.ema_unets):\n unet.to(self.device if unet_index == index else 'cpu')\n\n self.ema_unet_being_trained_index = index\n return self.ema_unets[index]\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 448, "lineno": 819, "function_name": "get_ema_unet"}, "groundtruth": " if not self.use_ema:\n return\n\n unet_number = self.validate_unet_number(unet_number)\n index = unet_number - 1\n\n if isinstance(self.unets, nn.ModuleList):\n unets_list = [unet for unet in self.ema_unets]\n delattr(self, 'ema_unets')\n self.ema_unets = unets_list\n\n if index != self.ema_unet_being_trained_index:\n for unet_index, unet in enumerate(self.ema_unets):\n unet.to(self.device if unet_index == index else 'cpu')\n\n self.ema_unet_being_trained_index = index\n return self.ema_unets[index]\n"} +{"prompt": "ation dataloader was already added'\n\n dl = DataLoader(ds, batch_size = batch_size, **dl_kwargs)\n self.add_valid_dataloader(dl)\n\n def create_train_iter(self):\n assert exists(self.train_dl), 'training dataloader has not been registered with the trainer yet'\n\n if exists(self.train_dl_iter):\n return\n\n self.train_dl_iter = cycle(self.train_dl)\n\n def create_valid_iter(self):\n assert exists(self.valid_dl), 'validation dataloader has not been registered with the trainer yet'\n\n if exists(self.valid_dl_iter):\n return\n\n self.valid_dl_iter = cycle(self.valid_dl)\n\n def train_step(self, unet_number = None, **kwargs):\n if not self.prepared:\n self.prepare()\n self.create_train_iter()\n loss = self.step_with_dl_iter(self.train_dl_iter, unet_number = unet_number, **kwargs)\n self.update(unet_number = unet_number)\n return loss\n\n @torch.no_grad()\n @eval_decorator\n def valid_step(self, **kwargs):\n if not self.prepared:\n self.prepare()\n self.create_valid_iter()\n context = self.use_ema_unets if kwargs.pop('use_ema_unets', False) else nullcontext\n with context():\n loss = self.step_with_dl_iter(self.valid_dl_iter, **kwargs)\n return loss\n\n def step_with_dl_iter(self, dl_iter, **kwargs):\n dl_tuple_output = cast_tuple(next(dl_iter))\n model_input = dict(list(zip(self.dl_tuple_output_keywords_names, dl_tuple_output)))\n loss = self.forward(**{**kwargs, **model_input})\n return loss\n\n # checkpointing functions\n\n @property\n def all_checkpoints_sorted(self):\n glob_pattern = os.path.join(self.checkpoint_path, '*.pt')\n checkpoints = self.fs.glob(glob_pattern)\n sorted_checkpoints = sorted(checkpoints, key = lambda x: int(str(x).split('.')[-2]), reverse = True)\n return sorted_checkpoints\n\n def load_from_checkpoint_folder(self, last_total_steps = -1):\n if last_total_steps != -1:\n filepath = os.path.join(self.checkpoint_path, f'checkpoint.{last_total_steps}.pt')\n self.load(filepath)\n return\n\n sorted_checkpoints = self.all_checkpoints_sorted\n\n if len(sorted_checkpoints) == 0:\n self.print(f'no checkpoints found to load from at {self.checkpoint_path}')\n return\n\n last_checkpoint = sorted_checkpoints[0]\n self.load(last_checkpoint)\n\n def save_to_checkpoint_folder(self):\n self.accelerator.wait_for_everyone()\n\n if not self.can_checkpoint:\n return\n\n total_steps = int(self.steps.sum().item())\n filepath = os.path.join(self.checkpoint_path, f'checkpoint.{total_steps}.pt')\n\n self.save(filepath)\n\n if self.max_checkpoints_keep <= 0:\n return\n\n sorted_checkpoints = self.all_checkpoints_sorted\n checkpoints_to_discard = sorted_checkpoints[self.max_checkpoints_keep:]\n\n for checkpoint in checkpoints_to_discard:\n self.fs.rm(checkpoint)\n\n # saving and loading functions\n\n def save(\n self,\n path,\n overwrite = True,\n without_optim_and_sched = False,\n **kwargs\n ):\n self.accelerator.wait_for_everyone()\n\n if not self.can_checkpoint:\n return\n\n fs = self.fs\n\n assert not (fs.exists(path) and not overwrite)\n\n self.reset_ema_unets_all_one_device()\n\n save_obj = dict(\n model = self.imagen.state_dict(),\n version = __version__,\n steps = self.steps.cpu(),\n **kwargs\n )\n\n save_optim_and_sched_iter = range(0, self.num_unets) if not without_optim_and_sched else tuple()\n\n for ind in save_optim_and_sched_iter:\n scaler_key = f'scaler{ind}'\n optimizer_key = f'optim{ind}'\n scheduler_key = f'scheduler{ind}'\n warmup_scheduler_key = f'warmup{ind}'\n\n scaler = getattr(self, scaler_key)\n optimizer = getattr(self, optimizer_key)\n scheduler = getattr(self, scheduler_key)\n warmup_scheduler = getattr(self, warmup_scheduler_key)\n\n if exists(scheduler):\n save_obj = {**save_obj, scheduler_key: scheduler.state_dict()}\n\n if exists(warmup_scheduler):\n save_obj = {**save_obj, warmup_scheduler_key: warmup_scheduler.state_dict()}\n\n save_obj = {**save_obj, scaler_key: scaler.state_dict(), optimizer_key: optimizer.state_dict()}\n\n if self.use_ema:\n save_obj = {**save_obj, 'ema': self.ema_unets.state_dict()}\n\n # determine if imagen config is available\n\n if hasattr(self.imagen, '_config'):\n self.print(f'this checkpoint is commandable from the CLI - \"imagen --model {str(path)} \\\"\\\"\"')\n\n save_obj = {\n **save_obj,\n 'imagen_type': 'elucidated' if self.is_elucidated else 'original',\n 'imagen_params': self.imagen._config\n }\n\n #save to path\n\n with fs.open(path, 'wb') as f:\n torch.save(save_obj, f)\n\n self.print(f'checkpoint saved to {path}')\n\n def load(self, path, only_model = False, strict = True, noop_if_not_exist = False):\n fs = self.fs\n\n if noop_if_not_exist and not fs.exists(path):\n self.print(f'trainer checkpoint not found at {str(path)}')\n return\n\n assert fs.exists(path), f'{path} does not exist'\n\n self.reset_ema_unets_all_one_device()\n\n # to avoid extra GPU memory usage in main process when using Accelerate\n\n with fs.open(path) as f:\n loaded_obj = torch.load(f, map_location='cpu')\n\n if version.parse(__version__) != version.parse(loaded_obj['version']):\n self.print(f'loading saved imagen at version {loaded_obj[\"version\"]}, but current package version is {__version__}')\n\n try:\n self.imagen.load_state_dict(loaded_obj['model'], strict = strict)\n except RuntimeError:\n print(\"Failed loading state dict. Trying partial load\")\n self.imagen.load_state_dict(restore_parts(self.imagen.state_dict(),\n loaded_obj['model']))\n\n if only_model:\n return loaded_obj\n\n self.steps.copy_(loaded_obj['steps'])\n\n for ind in range(0, self.num_unets):\n scaler_key = f'scaler{ind}'\n optimizer_key = f'optim{ind}'\n scheduler_key = f'scheduler{ind}'\n warmup_scheduler_key = f'warmup{ind}'\n\n scaler = getattr(self, scaler_key)\n optimizer = getattr(self, optimizer_key)\n scheduler = getattr(self, scheduler_key)\n warmup_scheduler = getattr(self, warmup_scheduler_key)\n\n if exists(scheduler) and scheduler_key in loaded_obj:\n scheduler.load_state_dict(loaded_obj[scheduler_key])\n\n if exists(warmup_scheduler) and warmup_scheduler_key in loaded_obj:\n warmup_scheduler.load_state_dict(loaded_obj[warmup_scheduler_key])\n\n if exists(optimizer):\n try:\n optimizer.load_state_dict(loaded_obj[optimizer_key])\n scaler.load_state_dict(loaded_obj[scaler_key])\n except:\n self.print('could not load optimizer and scaler, possibly because you have turned on mixed precision training since the last run. resuming with new optimizer and scalers')\n\n if self.use_ema:\n assert 'ema' in loaded_obj\n try:\n self.ema_unets.load_state_dict(loaded_obj['ema'], strict = strict)\n except RuntimeError:\n print(\"Failed loading state dict. Trying partial load\")\n self.ema_unets.load_state_dict(restore_parts(self.ema_unets.state_dict(),\n loaded_obj['ema']))\n\n self.print(f'checkpoint loaded from {path}')\n return loaded_obj\n\n # managing ema unets and their devices\n\n @property\n def unets(self):\n return nn.ModuleList([ema.ema_model for ema in self.ema_unets])\n\n def get_ema_unet(self, unet_number = None):\n if not self.use_ema:\n return\n\n unet_number = self.validate_unet_number(unet_number)\n index = unet_number - 1\n\n if isinstance(self.unets, nn.ModuleList):\n unets_list = [unet for unet in self.ema_unets]\n delattr(self, 'ema_unets')\n self.ema_unets = unets_list\n\n if index != self.ema_unet_being_trained_index:\n for unet_index, unet in enumerate(self.ema_unets):\n unet.to(self.device if unet_index == index else 'cpu')\n\n self.ema_unet_being_trained_index = index\n return self.ema_unets[index]\n\n def reset_ema_unets_all_one_device(self, device = None):\n if not self.use_ema:\n return\n\n device = default(device, self.device)\n self.ema_unets = nn.ModuleList([*self.ema_unets])\n self.ema_unets.to(device)\n\n self.ema_unet_being_trained_index = -1\n\n @torch.no_grad()\n @contextmanager\n def use_ema_unets(self):\n if not self.use_ema:\n output = yield\n return output\n\n self.reset_ema_unets_all_one_device()\n self.imagen.reset_unets_all_one_device()\n\n self.unets.eval()\n\n trainable_unets = self.imagen.unets\n self.imagen.unets = self.unets # swap in exponential moving averaged unets for sampling\n\n output = yield\n\n self.imagen.unets = trainable_unets # restore original training unets\n\n # cast the ema_model unets back to original device\n for ema in self.ema_unets:\n ema.restore_ema_model_device()\n\n return output\n\n def print_unet_devices(self):\n self.print('unet devices:')\n for i, unet in enumerate(self.imagen.unets):\n device = next(unet.parameters()).device\n self.print(f'\\tunet {i}: {device}')\n\n if not self.use_ema:\n return\n\n self.print('\\nema unet devices:')\n for i, ema_unet in enumerate(self.ema_unets):\n device = next(ema_unet.parameters()).device\n self.print(f'\\tema unet {i}: {device}')\n\n # overriding state dict functions\n\n def state_dict(self, *args, **kwargs):\n self.reset_ema_unets_all_one_device()\n return super().state_dict(*args, **kwargs)\n\n def load_state_dict(self, *args, **kwargs):\n self.reset_ema_unets_all_one_device()\n return super().load_state_dict(*args, **kwargs)\n\n # encoding text functions\n\n def encode_text(self, text, **kwargs):\n return self.imagen.encode_text(text, **kwargs)\n\n # forwarding functions and gradient step updates\n\n def update(self, unet_number = None):\n unet_number = self.validate_unet_number(unet_number)\n self.validate_and_set_unet_being_trained(unet_number)\n self.set_accelerator_scaler(unet_number)\n\n index = unet_number - 1\n unet = self.unet_being_trained\n\n optimizer = getattr(self, f'optim{index}')\n scaler = getattr(self, f'scaler{index}')\n scheduler = getattr(self, f'scheduler{index}')\n warmup_scheduler = getattr(self, f'warmup{index}')\n\n # set the grad scaler on the accelerator, since we are managing one per u-net\n\n if exists(self.max_grad_norm):\n self.accelerator.clip_grad_norm_(unet.parameters(), self.max_grad_norm)\n\n optimizer.step()\n optimizer.zero_grad()\n\n if self.use_ema:\n ema_unet = self.get_ema_unet(unet_number)\n ema_unet.update()\n\n # scheduler, if needed\n\n maybe_warmup_context = nullcontext() if not exists(warmup_scheduler) else warmup_scheduler.dampening()\n\n with maybe_warmup_context:\n if exists(scheduler) and not self.accelerator.optimizer_step_was_skipped: # recommended in the docs\n scheduler.step()\n\n self.steps += F.one_hot(torch.tensor(unet_number - 1, device = self.steps.device), num_classes = len(self.steps))\n\n if not exists(self.checkpoint_path):\n return\n\n total_steps = int(self.steps.sum().item())\n\n if total_steps % self.checkpoint_every:\n return\n\n self.save_to_checkpoint_folder()\n\n @torch.no_grad()\n @cast_torch_tensor\n @imagen_sample_in_chunks\n def sample(self, *args, **kwargs):\n context = nullcontext if kwargs.pop('use_non_ema', False) else self.use_ema_unets\n\n self.print_untrained_unets()\n\n if not self.is_main:\n kwargs['use_tqdm'] = False\n\n with context():\n output = self.imagen.sample(*args, device = self.device, **kwargs)\n\n return output\n\n @partial(cast_torch_tensor, cast_fp16 = True)\n def forward(\n self,\n *args,\n unet_number = None,\n max_batch_size = None,\n **kwargs\n ):", "metadata": {"task_id": "lucidrains--imagen-pytorch/66", "ground_truth": " unet_number = self.validate_unet_number(unet_number)\n self.validate_and_set_unet_being_trained(unet_number)\n self.set_accelerator_scaler(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, f'you can only train unet #{self.only_train_unet_number}'\n\n total_loss = 0.\n\n for chunk_size_frac, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs):\n with self.accelerator.autocast():\n loss = self.imagen(*chunked_args, unet = self.unet_being_trained, unet_number = unet_number, **chunked_kwargs)\n loss = loss * chunk_size_frac\n\n total_loss += loss.item()\n\n if self.training:\n self.accelerator.backward(loss)\n\n return total_loss\n", "fpath_tuple": ["lucidrains_imagen-pytorch", "imagen_pytorch", "trainer.py"], "context_start_lineno": 586, "lineno": 972, "function_name": "forward"}, "groundtruth": " unet_number = self.validate_unet_number(unet_number)\n self.validate_and_set_unet_being_trained(unet_number)\n self.set_accelerator_scaler(unet_number)\n\n assert not exists(self.only_train_unet_number) or self.only_train_unet_number == unet_number, f'you can only train unet #{self.only_train_unet_number}'\n\n total_loss = 0.\n\n for chunk_size_frac, (chunked_args, chunked_kwargs) in split_args_and_kwargs(*args, split_size = max_batch_size, **kwargs):\n with self.accelerator.autocast():\n loss = self.imagen(*chunked_args, unet = self.unet_being_trained, unet_number = unet_number, **chunked_kwargs)\n loss = loss * chunk_size_frac\n\n total_loss += loss.item()\n\n if self.training:\n self.accelerator.backward(loss)\n\n return total_loss\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP Evaluator which applies causal masks to selectors.\"\"\"\n\nfrom typing import Sequence, Union\n\nimport numpy as np\nfrom tracr.rasp import rasp\n\n\nclass CausalEvaluator(rasp.DefaultRASPEvaluator):\n \"\"\"Evaluates RASP with causal masking.\"\"\"\n\n def evaluate(\n self, expr: rasp.RASPExpr, xs: Sequence[rasp.Value]\n ) -> Union[Sequence[rasp.Value], rasp.SelectorValue]:", "metadata": {"task_id": "deepmind--tracr/0", "ground_truth": " out = super().evaluate(expr, xs)\n\n if not isinstance(expr, rasp.Selector):\n return out\n\n out = np.array(out)\n causal_mask = np.tril(np.full(out.shape, 1))\n return np.logical_and(causal_mask, out).tolist()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "causal_eval.py"], "context_start_lineno": 0, "lineno": 28, "function_name": "evaluate"}, "groundtruth": " out = super().evaluate(expr, xs)\n\n if not isinstance(expr, rasp.Selector):\n return out\n\n out = np.array(out)\n causal_mask = np.tril(np.full(out.shape, 1))\n return np.logical_and(causal_mask, out).tolist()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:", "metadata": {"task_id": "deepmind--tracr/1", "ground_truth": " if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 87, "function_name": "__getitem__"}, "groundtruth": " if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"", "metadata": {"task_id": "deepmind--tracr/2", "ground_truth": " new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 158, "function_name": "annotate"}, "groundtruth": " new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"", "metadata": {"task_id": "deepmind--tracr/3", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 203, "function_name": "__add__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"", "metadata": {"task_id": "deepmind--tracr/4", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 209, "function_name": "__radd__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"", "metadata": {"task_id": "deepmind--tracr/5", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 215, "function_name": "__sub__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"", "metadata": {"task_id": "deepmind--tracr/6", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 221, "function_name": "__rsub__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"", "metadata": {"task_id": "deepmind--tracr/7", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 227, "function_name": "__mul__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"", "metadata": {"task_id": "deepmind--tracr/8", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 233, "function_name": "__rmul__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"", "metadata": {"task_id": "deepmind--tracr/9", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 239, "function_name": "__truediv__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"", "metadata": {"task_id": "deepmind--tracr/10", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 254, "function_name": "__and__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"", "metadata": {"task_id": "deepmind--tracr/11", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 260, "function_name": "__or__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"", "metadata": {"task_id": "deepmind--tracr/12", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 266, "function_name": "__rand__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"", "metadata": {"task_id": "deepmind--tracr/13", "ground_truth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 272, "function_name": "__ror__"}, "groundtruth": " if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):", "metadata": {"task_id": "deepmind--tracr/14", "ground_truth": " super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 334, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):", "metadata": {"task_id": "deepmind--tracr/15", "ground_truth": " super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 359, "function_name": "__init__"}, "groundtruth": " super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):", "metadata": {"task_id": "deepmind--tracr/16", "ground_truth": " super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 381, "function_name": "__init__"}, "groundtruth": " super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n"} +{"prompt": " DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP program objects.\n\nEvery object in the RASP language is a function.\n\nThe most important type is S-Op, which is a function List[Value] -> List[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):", "metadata": {"task_id": "deepmind--tracr/17", "ground_truth": " super().__init__()\n self.value = value\n self.check_length = check_length\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 0, "lineno": 406, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.value = value\n self.check_length = check_length\n"} +{"prompt": "[Value].\n\nAn S-Op represents a state inside the residual stream of the transformer.\nTherefore, any RASP program that represents a transformer computation must\ndefine a final S-Op that represents the state of the residual stream at the\nend of the computation. In particular, given an S-Op `x`,\n`x([1, 2, 3])` represents something like the state of the residual stream\nat location `x` when the transformer is fed [1, 2, 3] as input.\n\nA secondary (but still important) type is Selector, which is a function\nList[Value] -> List[List[bool]]. Given a Selector `sel`, sel([1, 2, 3])\nrepresents something like an attention matrix in the transformer.\n\nFor a full reference on RASP, see https://arxiv.org/abs/2106.06981.\n\"\"\"\n\nimport abc\nimport collections.abc\nimport copy\nimport enum\nimport functools\nimport itertools\nfrom typing import (Any, Callable, Dict, Generic, List, Mapping, Optional,\n Sequence, TypeVar, Union)\n\nfrom absl import logging\nimport numpy as np\nfrom typing_extensions import Protocol\n\nSelectorValue = List[List[bool]]\nNumericValue = Union[int, float]\nValue = Union[None, int, float, str, bool]\nVT = TypeVar(\"VT\", bound=Value)\nRASPExprT = TypeVar(\"RASPExprT\", bound=\"RASPExpr\")\nSOpT = TypeVar(\"SOpT\", bound=\"SOp\")\nT = TypeVar(\"T\")\n\n_NAME_KEY = \"name\"\n_ENCODING_KEY = \"encoding\"\n\n# These are run on every expression when it's initialised.\n# Add your own annotators to this dict to add custom default annotations.\n#\n# For example, DEFAULT_ANNOTATORS['foo'] will provide the default value for\n# expr.annotations['foo]. The annotator will get called lazily the first time\n# that key is accessed.\n#\n# See the `default_name` annotator for a full example.\nDEFAULT_ANNOTATORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:", "metadata": {"task_id": "deepmind--tracr/18", "ground_truth": " if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 18, "lineno": 436, "function_name": "__call__"}, "groundtruth": " if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n"} +{"prompt": "ORS: Dict[str, \"Annotator\"] = {}\n\n\nclass Annotator(Protocol):\n\n def __call__(self, expr: \"RASPExpr\") -> Any:\n \"\"\"What annotation to add to `expr`.\"\"\"\n\n\nclass _Annotations(collections.abc.Mapping):\n \"\"\"Holds the expression's annotations.\n\n It's immutable to the user, but will attempt to generate default values\n lazily when missing keys are requested.\n \"\"\"\n\n def __init__(self, expr, **kwargs: Any):\n self._expr = expr\n self._inner_dict: Dict[str, Any] = {**kwargs}\n\n def __getitem__(self, key: str) -> Any:\n if key not in self._inner_dict:\n if key not in DEFAULT_ANNOTATORS:\n raise KeyError(\n f\"No annotation exists for key '{key}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):", "metadata": {"task_id": "deepmind--tracr/19", "ground_truth": " super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 66, "lineno": 487, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n"} +{"prompt": "}'. \"\n f\"Available keys: {list(*self.keys(), *DEFAULT_ANNOTATORS.keys())}\")\n self._inner_dict[key] = DEFAULT_ANNOTATORS[key](self._expr)\n\n return self._inner_dict[key]\n\n def __iter__(self):\n return iter(self._inner_dict)\n\n def __len__(self):\n return len(self._inner_dict)\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):", "metadata": {"task_id": "deepmind--tracr/20", "ground_truth": " super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 90, "lineno": 516, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n"} +{"prompt": "\n\n\nclass RASPExpr(abc.ABC):\n \"\"\"A class distinguishing RASP expressions from other objects.\"\"\"\n _ids = itertools.count(1)\n\n def __init__(self):\n self._annotations: Mapping[str, Any] = _Annotations(self)\n\n @abc.abstractmethod\n def __call__(self,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASPExpr using the standard evaluator.\"\"\"\n\n @property\n def annotations(self) -> Mapping[str, Any]:\n \"\"\"The annotations of this expression instance.\"\"\"\n return self._annotations\n\n @annotations.setter\n def annotations(self, annotations: Mapping[str, Any]):\n self._annotations = _Annotations(self, **annotations)\n\n @property\n def name(self) -> str:\n \"\"\"The name of this expression.\"\"\"\n return self.annotations[_NAME_KEY]\n\n @property\n @abc.abstractmethod\n def children(self) -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):", "metadata": {"task_id": "deepmind--tracr/21", "ground_truth": " super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 100, "lineno": 529, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n"} +{"prompt": " -> Sequence[\"RASPExpr\"]:\n \"\"\"Direct dependencies of this expression.\"\"\"\n\n @functools.cached_property\n def unique_id(self):\n \"\"\"A unique id for every expression instance.\"\"\"\n return next(self._ids)\n\n def copy(self: RASPExprT) -> RASPExprT:\n \"\"\"Returns a shallow copy of this RASPExpr with a new ID.\"\"\"\n return copy.copy(self)\n\n @property\n def label(self) -> str:\n return f\"{self.name}_{self.unique_id}\"\n\n def named(self: RASPExprT, name: str) -> RASPExprT:\n \"\"\"Convenience method for adding a name.\"\"\"\n return annotate(self, name=name)\n\n def annotated(self: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):", "metadata": {"task_id": "deepmind--tracr/22", "ground_truth": " self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 131, "lineno": 559, "function_name": "__init__"}, "groundtruth": " self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n"} +{"prompt": "prT:\n \"\"\"Convenience method for adding annotations.\"\"\"\n return annotate(self, **annotations)\n\n\ndef annotate(expr: RASPExprT, **annotations) -> RASPExprT:\n \"\"\"Creates a new expr with added annotations.\"\"\"\n new = expr.copy()\n # Note that new annotations will overwrite existing ones with matching keys.\n new.annotations = {**expr.annotations, **annotations}\n return new\n\n\n### S-Ops.\n\n\nclass SOp(RASPExpr):\n \"\"\"A Sequence Operation.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> Sequence[Value]:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of SOps using numeric operators with constant values.\n # Note: if inheriting SOp by a dataclass, make sure to disable eq and order,\n # as they will override these.\n\n def __lt__(self, other: Value) -> \"SOp\":\n \"\"\"self < other.\"\"\"\n return Map(lambda x: x < other, self)\n\n def __le__(self, other: Value) -> \"SOp\":\n \"\"\"self <= other.\"\"\"\n return Map(lambda x: x <= other, self)\n\n def __eq__(self, other: Value) -> \"SOp\":\n \"\"\"self == other.\"\"\"\n return Map(lambda x: x == other, self)\n\n def __ne__(self, other: Value) -> \"SOp\":\n \"\"\"self != other.\"\"\"\n return Map(lambda x: x != other, self)\n\n def __gt__(self, other: Value) -> \"SOp\":\n \"\"\"self > other.\"\"\"\n return Map(lambda x: x > other, self)\n\n def __ge__(self, other: Value) -> \"SOp\":\n \"\"\"self >= other.\"\"\"\n return Map(lambda x: x >= other, self)\n\n def __add__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"self + other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, self, other)\n return Map(lambda x: x + other, self)\n\n def __radd__(self, other: Union[\"SOp\", Value]) -> \"SOp\":\n \"\"\"other + self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x + y, other, self)\n return Map(lambda x: other + x, self)\n\n def __sub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self - other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, self, other)\n return Map(lambda x: x - other, self)\n\n def __rsub__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other - self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x - y, other, self)\n return Map(lambda x: other - x, self)\n\n def __mul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self * other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, self, other)\n return Map(lambda x: x * other, self)\n\n def __rmul__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other * self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x * y, other, self)\n return Map(lambda x: other * x, self)\n\n def __truediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"", "metadata": {"task_id": "deepmind--tracr/23", "ground_truth": " if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 151, "lineno": 586, "function_name": "selector_and"}, "groundtruth": " if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n"} +{"prompt": ": Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self / other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, self, other)\n return Map(lambda x: x / other, self)\n\n def __rtruediv__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other / self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x / y, other, self)\n return Map(lambda x: other / x, self)\n\n def __invert__(self) -> \"SOp\":\n return Map(lambda x: not x, self)\n\n def __and__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self & other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, self, other)\n return Map(lambda x: x and other, self)\n\n def __or__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"self | other.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, self, other)\n return Map(lambda x: x or other, self)\n\n def __rand__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other & self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x and y, other, self)\n return Map(lambda x: other and x, self)\n\n def __ror__(self, other: Union[\"SOp\", NumericValue]) -> \"SOp\":\n \"\"\"other | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"", "metadata": {"task_id": "deepmind--tracr/24", "ground_truth": " super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 237, "lineno": 677, "function_name": "__init__"}, "groundtruth": " super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n"} +{"prompt": " | self.\"\"\"\n if isinstance(other, SOp):\n return SequenceMap(lambda x, y: x or y, other, self)\n return Map(lambda x: x or other, self)\n\n\nclass TokensType(SOp):\n \"\"\"Primitive SOp returning the original input tokens.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"tokens\"\n\n def __repr__(self):\n return \"tokens\"\n\n\nclass IndicesType(SOp):\n \"\"\"Primitive SOp returning the position index at each token.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"indices\"\n\n def __repr__(self):\n return \"indices\"\n\n\nclass LengthType(SOp):\n \"\"\"Primitive SOp returning the total length of the input.\"\"\"\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n @property\n def label(self) -> str:\n return \"length\"\n\n def __repr__(self):\n return \"length\"\n\n\ntokens = TokensType()\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"", "metadata": {"task_id": "deepmind--tracr/25", "ground_truth": " if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 271, "lineno": 724, "function_name": "default_encoding"}, "groundtruth": " if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n"} +{"prompt": "\nindices = IndicesType()\nlength = LengthType()\n\n\nclass Map(SOp):\n \"\"\"SOp that evaluates the function elementwise on the input SOp.\n\n Map(lambda x: x + 1, tokens).eval([1, 2, 3]) == [2, 3, 4]\n \"\"\"\n\n def __init__(self, f: Callable[[Value], Value], inner: SOp):\n super().__init__()\n self.f = f\n self.inner = inner\n\n assert isinstance(self.inner, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n if isinstance(self.inner, Map):\n # Combine the functions into just one.\n inner_f = self.inner.f\n self.f = lambda t: f(inner_f(t))\n self.inner = self.inner.inner\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\nclass SequenceMap(SOp):\n \"\"\"SOp that evaluates the function elementwise on the two given SOp's.\n\n SequenceMap(lambda x, y: x - y, length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:", "metadata": {"task_id": "deepmind--tracr/26", "ground_truth": " for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 322, "lineno": 762, "function_name": "default_name"}, "groundtruth": " for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n"} +{"prompt": ", length, tokens).eval([1, 2, 3]) == [2, 1, 0]\n \"\"\"\n\n def __init__(self, f: Callable[[Value, Value], Value], fst: SOp, snd: SOp):\n super().__init__()\n\n if fst == snd:\n logging.warning(\"Creating a SequenceMap with both inputs being the same \"\n \"SOp is discouraged. You should use a Map instead.\")\n\n self.f = f\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, SOp)\n assert isinstance(self.snd, SOp)\n assert callable(self.f) and not isinstance(self.f, RASPExpr)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass LinearSequenceMap(SequenceMap):\n \"\"\"SOp that evaluates a linear function elementwise on the two given SOp's.\"\"\"\n\n def __init__(self, fst: SOp, snd: SOp, fst_fac: float, snd_fac: float):\n super().__init__(fst=fst, snd=snd, f=lambda x, y: fst_fac * x + snd_fac * y)\n self.fst_fac = fst_fac\n self.snd_fac = snd_fac\n\n\nclass Full(SOp):\n \"\"\"A SOp evaluating to [fill]*len(input_values).\"\"\"\n\n def __init__(self, fill: Value):\n super().__init__()\n self.fill = fill\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):", "metadata": {"task_id": "deepmind--tracr/27", "ground_truth": " self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 355, "lineno": 792, "function_name": "__init__"}, "groundtruth": " self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n"} +{"prompt": "]:\n return []\n\n\ndef sop_not(sop: SOp) -> SOp:\n return Map(lambda t: not t, sop)\n\n\nclass ConstantSOp(SOp, Generic[VT]):\n \"\"\"A constant S-Op for testing purposes.\"\"\"\n\n def __init__(self, value: Sequence[VT], check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):\n self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n\n def eval_tokens(self, sop: TokensType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(xs)\n\n def eval_indices(self, sop: IndicesType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(range(len(xs)))\n\n def eval_length(self, sop: LengthType, xs: Sequence[Value]) -> Sequence[int]:\n del sop\n return [len(xs)] * len(xs)\n\n def eval_sequence_map(self, sop: SequenceMap,\n xs: Sequence[Value]) -> Sequence[Value]:", "metadata": {"task_id": "deepmind--tracr/28", "ground_truth": " fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 394, "lineno": 831, "function_name": "eval_sequence_map"}, "groundtruth": " fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n"} +{"prompt": "\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\n### Selectors.\n\n\nclass Predicate(Protocol):\n\n def __call__(self, key: Value, query: Value) -> bool:\n \"\"\"Applies the predicate.\"\"\"\n\n\nclass Comparison(enum.Enum):\n \"\"\"A two-place boolean comparison predicate for use in Select.\"\"\"\n EQ = \"==\"\n LT = \"<\"\n LEQ = \"<=\"\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):\n self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n\n def eval_tokens(self, sop: TokensType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(xs)\n\n def eval_indices(self, sop: IndicesType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(range(len(xs)))\n\n def eval_length(self, sop: LengthType, xs: Sequence[Value]) -> Sequence[int]:\n del sop\n return [len(xs)] * len(xs)\n\n def eval_sequence_map(self, sop: SequenceMap,\n xs: Sequence[Value]) -> Sequence[Value]:\n fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n\n def eval_map(self, sop: Map, xs: Sequence[Value]) -> Sequence[Value]:", "metadata": {"task_id": "deepmind--tracr/29", "ground_truth": " return [\n sop.f(x) if x is not None else None\n for x in self.evaluate(sop.inner, xs)\n ]\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 408, "lineno": 839, "function_name": "eval_map"}, "groundtruth": " return [\n sop.f(x) if x is not None else None\n for x in self.evaluate(sop.inner, xs)\n ]\n"} +{"prompt": "\n GT = \">\"\n GEQ = \">=\"\n NEQ = \"!=\"\n TRUE = \"True\"\n FALSE = \"False\"\n\n def __call__(self, key: Value, query: Value) -> bool:\n if key is None:\n raise ValueError(\"key is None!\")\n if query is None:\n raise ValueError(\"query is None!\")\n return _comparison_table[self](key, query)\n\n\n_comparison_table = {\n Comparison.EQ: lambda key, query: key == query,\n Comparison.LT: lambda key, query: key < query,\n Comparison.LEQ: lambda key, query: key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):\n self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n\n def eval_tokens(self, sop: TokensType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(xs)\n\n def eval_indices(self, sop: IndicesType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(range(len(xs)))\n\n def eval_length(self, sop: LengthType, xs: Sequence[Value]) -> Sequence[int]:\n del sop\n return [len(xs)] * len(xs)\n\n def eval_sequence_map(self, sop: SequenceMap,\n xs: Sequence[Value]) -> Sequence[Value]:\n fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n\n def eval_map(self, sop: Map, xs: Sequence[Value]) -> Sequence[Value]:\n return [\n sop.f(x) if x is not None else None\n for x in self.evaluate(sop.inner, xs)\n ]\n\n def eval_full(self, sop: Full, xs: Sequence[Value]) -> Sequence[Value]:\n return [sop.fill] * len(xs)\n\n def eval_constant_sop(self, sop: ConstantSOp,\n xs: Sequence[Value]) -> Sequence[Value]:", "metadata": {"task_id": "deepmind--tracr/30", "ground_truth": " if sop.check_length and (len(xs) != len(sop.value)):\n raise ValueError(\n f\"Constant len {len(sop.value)} doesn't match input len {len(xs)}.\")\n return sop.value\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 428, "lineno": 849, "function_name": "eval_constant_sop"}, "groundtruth": " if sop.check_length and (len(xs) != len(sop.value)):\n raise ValueError(\n f\"Constant len {len(sop.value)} doesn't match input len {len(xs)}.\")\n return sop.value\n"} +{"prompt": ": key <= query,\n Comparison.GT: lambda key, query: key > query,\n Comparison.GEQ: lambda key, query: key >= query,\n Comparison.NEQ: lambda key, query: key != query,\n Comparison.TRUE: lambda key, query: True,\n Comparison.FALSE: lambda key, query: False,\n}\n\n\nclass Selector(RASPExpr):\n \"\"\"RASP Selector. Represents something like an attention head's weights.\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):\n self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n\n def eval_tokens(self, sop: TokensType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(xs)\n\n def eval_indices(self, sop: IndicesType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(range(len(xs)))\n\n def eval_length(self, sop: LengthType, xs: Sequence[Value]) -> Sequence[int]:\n del sop\n return [len(xs)] * len(xs)\n\n def eval_sequence_map(self, sop: SequenceMap,\n xs: Sequence[Value]) -> Sequence[Value]:\n fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n\n def eval_map(self, sop: Map, xs: Sequence[Value]) -> Sequence[Value]:\n return [\n sop.f(x) if x is not None else None\n for x in self.evaluate(sop.inner, xs)\n ]\n\n def eval_full(self, sop: Full, xs: Sequence[Value]) -> Sequence[Value]:\n return [sop.fill] * len(xs)\n\n def eval_constant_sop(self, sop: ConstantSOp,\n xs: Sequence[Value]) -> Sequence[Value]:\n if sop.check_length and (len(xs) != len(sop.value)):\n raise ValueError(\n f\"Constant len {len(sop.value)} doesn't match input len {len(xs)}.\")\n return sop.value\n\n def eval_selector_width(self, sop: SelectorWidth,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_values = self.evaluate(sop.selector, xs)\n return [sum(row) for row in selector_values]\n\n def eval_aggregate(self, sop: Aggregate,\n xs: Sequence[Value]) -> Sequence[Value]:", "metadata": {"task_id": "deepmind--tracr/31", "ground_truth": " selector_value = self.evaluate(sop.selector, xs)\n values = self.evaluate(sop.sop, xs)\n default = sop.default\n\n return [\n _mean(_get_selected(row, values), default) for row in selector_value\n ]\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 446, "lineno": 861, "function_name": "eval_aggregate"}, "groundtruth": " selector_value = self.evaluate(sop.selector, xs)\n values = self.evaluate(sop.sop, xs)\n default = sop.default\n\n return [\n _mean(_get_selected(row, values), default) for row in selector_value\n ]\n"} +{"prompt": ".\"\"\"\n\n def __call__(self, xs: Sequence[Value]) -> SelectorValue:\n return evaluate(self, xs) # pytype: disable=bad-return-type\n\n # Allow construction of Selector combinations using Python logical operators.\n def __and__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self & other.\"\"\"\n return selector_and(self, other)\n\n def __rand__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other & self.\"\"\"\n return selector_and(other, self)\n\n def __or__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"self | other.\"\"\"\n return selector_or(self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):\n self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n\n def eval_tokens(self, sop: TokensType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(xs)\n\n def eval_indices(self, sop: IndicesType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(range(len(xs)))\n\n def eval_length(self, sop: LengthType, xs: Sequence[Value]) -> Sequence[int]:\n del sop\n return [len(xs)] * len(xs)\n\n def eval_sequence_map(self, sop: SequenceMap,\n xs: Sequence[Value]) -> Sequence[Value]:\n fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n\n def eval_map(self, sop: Map, xs: Sequence[Value]) -> Sequence[Value]:\n return [\n sop.f(x) if x is not None else None\n for x in self.evaluate(sop.inner, xs)\n ]\n\n def eval_full(self, sop: Full, xs: Sequence[Value]) -> Sequence[Value]:\n return [sop.fill] * len(xs)\n\n def eval_constant_sop(self, sop: ConstantSOp,\n xs: Sequence[Value]) -> Sequence[Value]:\n if sop.check_length and (len(xs) != len(sop.value)):\n raise ValueError(\n f\"Constant len {len(sop.value)} doesn't match input len {len(xs)}.\")\n return sop.value\n\n def eval_selector_width(self, sop: SelectorWidth,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_values = self.evaluate(sop.selector, xs)\n return [sum(row) for row in selector_values]\n\n def eval_aggregate(self, sop: Aggregate,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_value = self.evaluate(sop.selector, xs)\n values = self.evaluate(sop.sop, xs)\n default = sop.default\n\n return [\n _mean(_get_selected(row, values), default) for row in selector_value\n ]\n\n def eval_select(self, sel: Select, xs: Sequence[Value]) -> SelectorValue:\n \"\"\"Evaluates a Select on `xs`.\"\"\"", "metadata": {"task_id": "deepmind--tracr/32", "ground_truth": " key_values = self.evaluate(sel.keys, xs)\n query_values = self.evaluate(sel.queries, xs)\n\n key_len = len(key_values)\n query_len = len(query_values)\n out = np.zeros((query_len, key_len), dtype=bool).tolist()\n for row, query in enumerate(query_values):\n for col, key in enumerate(key_values):\n out[row][col] = bool(sel.predicate(key, query))\n return out\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 456, "lineno": 871, "function_name": "eval_select"}, "groundtruth": " key_values = self.evaluate(sel.keys, xs)\n query_values = self.evaluate(sel.queries, xs)\n\n key_len = len(key_values)\n query_len = len(query_values)\n out = np.zeros((query_len, key_len), dtype=bool).tolist()\n for row, query in enumerate(query_values):\n for col, key in enumerate(key_values):\n out[row][col] = bool(sel.predicate(key, query))\n return out\n"} +{"prompt": "self, other)\n\n def __ror__(self, other: \"Selector\") -> \"Selector\":\n \"\"\"other | self.\"\"\"\n return selector_or(other, self)\n\n def __invert__(self) -> \"Selector\":\n \"\"\"~self.\"\"\"\n return selector_not(self)\n\n\nclass Select(Selector):\n \"\"\"Primitive that creates a Selector.\"\"\"\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):\n self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n\n def eval_tokens(self, sop: TokensType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(xs)\n\n def eval_indices(self, sop: IndicesType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(range(len(xs)))\n\n def eval_length(self, sop: LengthType, xs: Sequence[Value]) -> Sequence[int]:\n del sop\n return [len(xs)] * len(xs)\n\n def eval_sequence_map(self, sop: SequenceMap,\n xs: Sequence[Value]) -> Sequence[Value]:\n fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n\n def eval_map(self, sop: Map, xs: Sequence[Value]) -> Sequence[Value]:\n return [\n sop.f(x) if x is not None else None\n for x in self.evaluate(sop.inner, xs)\n ]\n\n def eval_full(self, sop: Full, xs: Sequence[Value]) -> Sequence[Value]:\n return [sop.fill] * len(xs)\n\n def eval_constant_sop(self, sop: ConstantSOp,\n xs: Sequence[Value]) -> Sequence[Value]:\n if sop.check_length and (len(xs) != len(sop.value)):\n raise ValueError(\n f\"Constant len {len(sop.value)} doesn't match input len {len(xs)}.\")\n return sop.value\n\n def eval_selector_width(self, sop: SelectorWidth,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_values = self.evaluate(sop.selector, xs)\n return [sum(row) for row in selector_values]\n\n def eval_aggregate(self, sop: Aggregate,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_value = self.evaluate(sop.selector, xs)\n values = self.evaluate(sop.sop, xs)\n default = sop.default\n\n return [\n _mean(_get_selected(row, values), default) for row in selector_value\n ]\n\n def eval_select(self, sel: Select, xs: Sequence[Value]) -> SelectorValue:\n \"\"\"Evaluates a Select on `xs`.\"\"\"\n key_values = self.evaluate(sel.keys, xs)\n query_values = self.evaluate(sel.queries, xs)\n\n key_len = len(key_values)\n query_len = len(query_values)\n out = np.zeros((query_len, key_len), dtype=bool).tolist()\n for row, query in enumerate(query_values):\n for col, key in enumerate(key_values):\n out[row][col] = bool(sel.predicate(key, query))\n return out\n\n def eval_constant_selector(self, sel: ConstantSelector,\n xs: Sequence[Value]) -> SelectorValue:", "metadata": {"task_id": "deepmind--tracr/33", "ground_truth": " if sel.check_length and (len(xs) != len(sel.value)):\n raise ValueError(\n f\"Constant len {len(xs)} doesn't match input len {len(sel.value)}.\")\n return sel.value\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 472, "lineno": 884, "function_name": "eval_constant_selector"}, "groundtruth": " if sel.check_length and (len(xs) != len(sel.value)):\n raise ValueError(\n f\"Constant len {len(xs)} doesn't match input len {len(sel.value)}.\")\n return sel.value\n"} +{"prompt": "\n\n def __init__(self, keys: SOp, queries: SOp, predicate: Predicate):\n super().__init__()\n self.keys = keys\n self.queries = queries\n self.predicate = predicate\n assert isinstance(self.keys, SOp)\n assert isinstance(self.queries, SOp)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.keys, self.queries]\n\n\nclass ConstantSelector(Selector):\n \"\"\"A constant selector for testing purposes.\"\"\"\n\n def __init__(self, value: SelectorValue, check_length: bool = True):\n super().__init__()\n self.value = value\n self.check_length = check_length\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return []\n\n\nclass SelectorWidth(SOp):\n \"\"\"SelectorWidth primitive.\"\"\"\n\n def __init__(self, selector: Selector):\n super().__init__()\n self.selector = selector\n assert isinstance(self.selector, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector]\n\n\nclass SelectorAnd(Selector):\n \"\"\"Implements elementwise `and` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):\n self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n\n def eval_tokens(self, sop: TokensType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(xs)\n\n def eval_indices(self, sop: IndicesType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(range(len(xs)))\n\n def eval_length(self, sop: LengthType, xs: Sequence[Value]) -> Sequence[int]:\n del sop\n return [len(xs)] * len(xs)\n\n def eval_sequence_map(self, sop: SequenceMap,\n xs: Sequence[Value]) -> Sequence[Value]:\n fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n\n def eval_map(self, sop: Map, xs: Sequence[Value]) -> Sequence[Value]:\n return [\n sop.f(x) if x is not None else None\n for x in self.evaluate(sop.inner, xs)\n ]\n\n def eval_full(self, sop: Full, xs: Sequence[Value]) -> Sequence[Value]:\n return [sop.fill] * len(xs)\n\n def eval_constant_sop(self, sop: ConstantSOp,\n xs: Sequence[Value]) -> Sequence[Value]:\n if sop.check_length and (len(xs) != len(sop.value)):\n raise ValueError(\n f\"Constant len {len(sop.value)} doesn't match input len {len(xs)}.\")\n return sop.value\n\n def eval_selector_width(self, sop: SelectorWidth,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_values = self.evaluate(sop.selector, xs)\n return [sum(row) for row in selector_values]\n\n def eval_aggregate(self, sop: Aggregate,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_value = self.evaluate(sop.selector, xs)\n values = self.evaluate(sop.sop, xs)\n default = sop.default\n\n return [\n _mean(_get_selected(row, values), default) for row in selector_value\n ]\n\n def eval_select(self, sel: Select, xs: Sequence[Value]) -> SelectorValue:\n \"\"\"Evaluates a Select on `xs`.\"\"\"\n key_values = self.evaluate(sel.keys, xs)\n query_values = self.evaluate(sel.queries, xs)\n\n key_len = len(key_values)\n query_len = len(query_values)\n out = np.zeros((query_len, key_len), dtype=bool).tolist()\n for row, query in enumerate(query_values):\n for col, key in enumerate(key_values):\n out[row][col] = bool(sel.predicate(key, query))\n return out\n\n def eval_constant_selector(self, sel: ConstantSelector,\n xs: Sequence[Value]) -> SelectorValue:\n if sel.check_length and (len(xs) != len(sel.value)):\n raise ValueError(\n f\"Constant len {len(xs)} doesn't match input len {len(sel.value)}.\")\n return sel.value\n\n def eval_selector_and(self, sel: SelectorAnd,\n xs: Sequence[Value]) -> SelectorValue:", "metadata": {"task_id": "deepmind--tracr/34", "ground_truth": " fst_values = self.evaluate(sel.fst, xs)\n snd_values = self.evaluate(sel.snd, xs)\n return np.logical_and(np.array(fst_values), np.array(snd_values)).tolist()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 484, "lineno": 891, "function_name": "eval_selector_and"}, "groundtruth": " fst_values = self.evaluate(sel.fst, xs)\n snd_values = self.evaluate(sel.snd, xs)\n return np.logical_and(np.array(fst_values), np.array(snd_values)).tolist()\n"} +{"prompt": "__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorOr(Selector):\n \"\"\"Implements elementwise `or` between selectors.\"\"\"\n\n def __init__(self, fst: Selector, snd: Selector):\n super().__init__()\n self.fst = fst\n self.snd = snd\n assert isinstance(self.fst, Selector)\n assert isinstance(self.snd, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.fst, self.snd]\n\n\nclass SelectorNot(Selector):\n \"\"\"Implements elementwise `not` on a selector.\"\"\"\n\n def __init__(self, inner: Selector):\n self.inner = inner\n super().__init__()\n assert isinstance(self.inner, Selector)\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.inner]\n\n\ndef selector_not(\n inner: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorNot, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(inner, Select):\n predicate = lambda k, q: not inner.predicate(k, q)\n return Select(inner.keys, inner.queries, predicate=predicate)\n\n return SelectorNot(inner)\n\n\ndef selector_and(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorAnd, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l and r)\n if simplified:\n return simplified\n\n return SelectorAnd(fst, snd)\n\n\ndef selector_or(\n fst: Selector,\n snd: Selector,\n simplify: bool = True,\n) -> Selector:\n \"\"\"Returns a SelectorOr, or a Select if simplifying is possible.\"\"\"\n if simplify and isinstance(fst, Select) and isinstance(snd, Select):\n simplified = _attempt_simplify(fst, snd, lambda l, r: l or r)\n if simplified:\n return simplified\n\n return SelectorOr(fst, snd)\n\n\ndef _attempt_simplify(\n fst: Select,\n snd: Select,\n combine: Callable[[bool, bool], bool],\n) -> Optional[Select]:\n \"\"\"Simplifies two Selects if possible.\n\n If two Selects in a compound Selector have matching keys and queries, they can\n be simplified into one Select with a compound predicate:\n\n lambda k,q: combine(fst.predicate(k,q), snd.predicate(k,q))\n\n This function returns a Select with this predicate if possible,\n and None otherwise.\n\n A Full SOp in a key or query position is a special case that always matches\n any SOp in the corresponding position in the other selector. In that case,\n we bake in the fill value into the corresponding Select's predicate before\n combining. This allows us to use the other SOp as the input to the simplified\n Select.\n\n Args:\n fst: the first Select.\n snd: the second Select.\n combine: how to combine the outputs of the individual predicates.\n\n Returns:\n A combined Select, if possible.\n \"\"\"\n fst_predicate = fst.predicate\n snd_predicate = snd.predicate\n common_keys = None\n common_queries = None\n\n if isinstance(fst.keys, Full):\n common_keys = snd.keys\n # We pass the predicate in as a default arg to avoid unintended recursion.\n fst_predicate = lambda key, query, p=fst_predicate: p(fst.keys.fill, query)\n if isinstance(snd.keys, Full):\n common_keys = fst.keys\n snd_predicate = lambda key, query, p=snd_predicate: p(snd.keys.fill, query)\n if isinstance(fst.queries, Full):\n common_queries = snd.queries\n fst_predicate = lambda key, query, p=fst_predicate: p(key, fst.queries.fill)\n if isinstance(snd.queries, Full):\n common_queries = fst.queries\n snd_predicate = lambda key, query, p=snd_predicate: p(key, snd.queries.fill)\n if fst.keys is snd.keys:\n common_keys = fst.keys\n if fst.queries is snd.queries:\n common_queries = fst.queries\n\n if not common_keys or not common_queries:\n return None\n\n def predicate(key, query):\n return combine(fst_predicate(key, query), snd_predicate(key, query))\n\n return Select(common_keys, common_queries, predicate=predicate)\n\n\nclass Aggregate(SOp, Generic[VT]):\n \"\"\"Aggregate primitive.\"\"\"\n\n def __init__(self,\n selector: Selector,\n sop: SOp,\n default: Optional[VT] = None):\n \"\"\"Initialises. The default is used where nothing is selected.\"\"\"\n super().__init__()\n self.selector = selector\n self.sop = sop\n self.default = default\n assert isinstance(self.selector, Selector)\n assert isinstance(self.sop, SOp)\n assert (self.default is None or isinstance(self.default,\n (str, float, bool, int)))\n\n @property\n def children(self) -> Sequence[RASPExpr]:\n return [self.selector, self.sop]\n\n\n### SOp encodings.\n\n\nclass Encoding(enum.Enum):\n \"\"\"The encoding used by a SOp. Only number-valued SOps support numerical.\"\"\"\n CATEGORICAL = \"categorical\"\n NUMERICAL = \"numerical\"\n\n\ndef numerical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.NUMERICAL)\n\n\ndef categorical(sop: SOpT) -> SOpT:\n return annotate(sop, encoding=Encoding.CATEGORICAL)\n\n\ndef get_encoding(sop: SOp) -> Encoding:\n return sop.annotations[\"encoding\"]\n\n\ndef is_numerical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is numerically encoded.\"\"\"\n return get_encoding(sop) == Encoding.NUMERICAL\n\n\ndef is_categorical(sop: SOp) -> bool:\n \"\"\"Check if the SOp is categorically encoded.\"\"\"\n return get_encoding(sop) == Encoding.CATEGORICAL\n\n\ndef default_encoding(expr: RASPExpr) -> Optional[Encoding]:\n \"\"\"Adds an 'encoding' annotation, default is Categorical.\"\"\"\n if not isinstance(expr, SOp):\n raise TypeError(f\"expr {expr} is not a SOp.\")\n\n return Encoding.CATEGORICAL\n\n\nDEFAULT_ANNOTATORS[_ENCODING_KEY] = default_encoding\n\n### naming.\n\n# Subclasses must appear here before superclasses in order for\n# the most specific entry to be used.\n\n_default_name_by_class = {\n # Primitives\n TokensType: \"tokens\",\n IndicesType: \"indices\",\n LengthType: \"length\",\n # SOps\n LinearSequenceMap: \"linear_sequence_map\",\n SequenceMap: \"sequence_map\",\n Map: \"map\",\n Full: \"full\",\n ConstantSOp: \"constant_sop\",\n SelectorWidth: \"selector_width\",\n Aggregate: \"aggregate\",\n SOp: \"sop\",\n # Selectors\n Select: \"select\",\n SelectorAnd: \"selector_and\",\n SelectorOr: \"selector_or\",\n SelectorNot: \"selector_not\",\n ConstantSelector: \"constant_selector\",\n Selector: \"selector\",\n}\n\n\ndef default_name(expr: RASPExpr) -> Dict[str, str]:\n for cls, name in _default_name_by_class.items():\n if isinstance(expr, cls):\n return name\n\n raise NotImplementedError(f\"{expr} was not given a default name!\")\n\n\nDEFAULT_ANNOTATORS[_NAME_KEY] = default_name\n\n### evaluation.\n\n\nclass RASPEvaluator(abc.ABC):\n \"\"\"ABC for RASP evaluators.\"\"\"\n\n @abc.abstractmethod\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n\n\nclass DefaultRASPEvaluator(abc.ABC):\n \"\"\"Default evaluator for RASP.\"\"\"\n\n def evaluate(self, expr: RASPExpr,\n xs: Sequence[Value]) -> Union[Sequence[Value], SelectorValue]:\n \"\"\"Evaluates the RASP expression on input `xs`.\"\"\"\n return self._eval_fn_by_expr_type[type(expr)](expr, xs)\n\n def __init__(self):\n self._eval_fn_by_expr_type = {\n # Primitives\n TokensType: self.eval_tokens,\n IndicesType: self.eval_indices,\n LengthType: self.eval_length,\n # SOps\n LinearSequenceMap: self.eval_sequence_map,\n SequenceMap: self.eval_sequence_map,\n Map: self.eval_map,\n Full: self.eval_full,\n ConstantSOp: self.eval_constant_sop,\n SelectorWidth: self.eval_selector_width,\n Aggregate: self.eval_aggregate,\n SOp: _raise_not_implemented,\n # Selectors\n Select: self.eval_select,\n SelectorAnd: self.eval_selector_and,\n SelectorOr: self.eval_selector_or,\n SelectorNot: self.eval_selector_not,\n ConstantSelector: self.eval_constant_selector,\n Selector: _raise_not_implemented,\n }\n\n def eval_tokens(self, sop: TokensType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(xs)\n\n def eval_indices(self, sop: IndicesType,\n xs: Sequence[Value]) -> Sequence[Value]:\n del sop\n return list(range(len(xs)))\n\n def eval_length(self, sop: LengthType, xs: Sequence[Value]) -> Sequence[int]:\n del sop\n return [len(xs)] * len(xs)\n\n def eval_sequence_map(self, sop: SequenceMap,\n xs: Sequence[Value]) -> Sequence[Value]:\n fst_values = self.evaluate(sop.fst, xs)\n snd_values = self.evaluate(sop.snd, xs)\n return [\n sop.f(x, y) if None not in [x, y] else None\n for x, y in zip(fst_values, snd_values)\n ]\n\n def eval_map(self, sop: Map, xs: Sequence[Value]) -> Sequence[Value]:\n return [\n sop.f(x) if x is not None else None\n for x in self.evaluate(sop.inner, xs)\n ]\n\n def eval_full(self, sop: Full, xs: Sequence[Value]) -> Sequence[Value]:\n return [sop.fill] * len(xs)\n\n def eval_constant_sop(self, sop: ConstantSOp,\n xs: Sequence[Value]) -> Sequence[Value]:\n if sop.check_length and (len(xs) != len(sop.value)):\n raise ValueError(\n f\"Constant len {len(sop.value)} doesn't match input len {len(xs)}.\")\n return sop.value\n\n def eval_selector_width(self, sop: SelectorWidth,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_values = self.evaluate(sop.selector, xs)\n return [sum(row) for row in selector_values]\n\n def eval_aggregate(self, sop: Aggregate,\n xs: Sequence[Value]) -> Sequence[Value]:\n selector_value = self.evaluate(sop.selector, xs)\n values = self.evaluate(sop.sop, xs)\n default = sop.default\n\n return [\n _mean(_get_selected(row, values), default) for row in selector_value\n ]\n\n def eval_select(self, sel: Select, xs: Sequence[Value]) -> SelectorValue:\n \"\"\"Evaluates a Select on `xs`.\"\"\"\n key_values = self.evaluate(sel.keys, xs)\n query_values = self.evaluate(sel.queries, xs)\n\n key_len = len(key_values)\n query_len = len(query_values)\n out = np.zeros((query_len, key_len), dtype=bool).tolist()\n for row, query in enumerate(query_values):\n for col, key in enumerate(key_values):\n out[row][col] = bool(sel.predicate(key, query))\n return out\n\n def eval_constant_selector(self, sel: ConstantSelector,\n xs: Sequence[Value]) -> SelectorValue:\n if sel.check_length and (len(xs) != len(sel.value)):\n raise ValueError(\n f\"Constant len {len(xs)} doesn't match input len {len(sel.value)}.\")\n return sel.value\n\n def eval_selector_and(self, sel: SelectorAnd,\n xs: Sequence[Value]) -> SelectorValue:\n fst_values = self.evaluate(sel.fst, xs)\n snd_values = self.evaluate(sel.snd, xs)\n return np.logical_and(np.array(fst_values), np.array(snd_values)).tolist()\n\n def eval_selector_or(self, sel: SelectorOr,\n xs: Sequence[Value]) -> SelectorValue:\n fst_values = self.evaluate(sel.fst, xs)\n snd_values = self.evaluate(sel.snd, xs)\n return np.logical_or(np.array(fst_values), np.array(snd_values)).tolist()\n\n def eval_selector_not(self, sel: SelectorNot,\n xs: Sequence[Value]) -> SelectorValue:\n values = self.evaluate(sel.inner, xs)\n return np.logical_not(np.array(values)).tolist()\n\n\ndef _get_selected(\n selector_row: List[bool],\n values: Sequence[VT],\n) -> Sequence[VT]:\n \"\"\"Helper for aggregate. [T T F], [a b c] -> [a b].\"\"\"\n return [v for s, v in zip(selector_row, values) if s]\n\n\ndef _mean(xs: Sequence[VT], default: VT) -> VT:\n \"\"\"Takes the mean for numbers and concats for strings.\"\"\"", "metadata": {"task_id": "deepmind--tracr/35", "ground_truth": " if not xs:\n return default\n exemplar = xs[0]\n if isinstance(exemplar, (int, bool)):\n return sum(xs) / len(xs)\n elif len(xs) == 1:\n return exemplar\n else:\n raise ValueError(f\"Unsupported type for aggregation: {xs}\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp.py"], "context_start_lineno": 529, "lineno": 917, "function_name": "_mean"}, "groundtruth": " if not xs:\n return default\n exemplar = xs[0]\n if isinstance(exemplar, (int, bool)):\n return sum(xs) / len(xs)\n elif len(xs) == 1:\n return exemplar\n else:\n raise ValueError(f\"Unsupported type for aggregation: {xs}\")\n"} +{"prompt": " + 1)([0, 1, 2]),\n [1, 2, 3],\n )\n self.assertEqual(\n (1 + rasp.tokens)([0, 1, 2]),\n [1, 2, 3],\n )\n\n def test_dunders_with_sop(self):\n self.assertEqual(\n (rasp.tokens + rasp.indices)([0, 1, 2]),\n [0, 2, 4],\n )\n self.assertEqual(\n (rasp.length - 1 - rasp.indices)([0, 1, 2]),\n [2, 1, 0],\n )\n self.assertEqual(\n (rasp.length * rasp.length)([0, 1, 2]),\n [9, 9, 9],\n )\n\n def test_logical_dunders(self):\n self.assertEqual(\n (rasp.tokens & True)([True, False]),\n [True, False],\n )\n self.assertEqual(\n (rasp.tokens & False)([True, False]),\n [False, False],\n )\n self.assertEqual(\n (rasp.tokens | True)([True, False]),\n [True, True],\n )\n self.assertEqual(\n (rasp.tokens | False)([True, False]),\n [True, False],\n )\n self.assertEqual(\n (True & rasp.tokens)([True, False]),\n [True, False],\n )\n self.assertEqual(\n (False & rasp.tokens)([True, False]),\n [False, False],\n )\n self.assertEqual(\n (True | rasp.tokens)([True, False]),\n [True, True],\n )\n self.assertEqual(\n (False | rasp.tokens)([True, False]),\n [True, False],\n )\n\n self.assertEqual(\n (~rasp.tokens)([True, False]),\n [False, True],\n )\n\n self.assertEqual(\n (rasp.ConstantSOp([True, True, False, False])\n & rasp.ConstantSOp([True, False, True, False]))([1, 1, 1, 1]),\n [True, False, False, False],\n )\n\n self.assertEqual(\n (rasp.ConstantSOp([True, True, False, False])\n | rasp.ConstantSOp([True, False, True, False]))([1, 1, 1, 1]),\n [True, True, True, False],\n )\n\n\nclass EncodingTest(parameterized.TestCase):\n \"\"\"Tests for SOp encodings.\"\"\"\n\n @parameterized.named_parameters(*_SOP_EXAMPLES())\n def test_all_sops_are_categorical_by_default(self, sop: rasp.SOp):\n self.assertTrue(rasp.is_categorical(sop))\n\n @parameterized.named_parameters(*_SOP_EXAMPLES())\n def test_is_numerical(self, sop: rasp.SOp):\n self.assertTrue(rasp.is_numerical(rasp.numerical(sop)))\n self.assertFalse(rasp.is_numerical(rasp.categorical(sop)))\n\n @parameterized.named_parameters(*_SOP_EXAMPLES())\n def test_is_categorical(self, sop: rasp.SOp):\n self.assertTrue(rasp.is_categorical(rasp.categorical(sop)))\n self.assertFalse(rasp.is_categorical(rasp.numerical(sop)))\n\n @parameterized.named_parameters(*_SOP_EXAMPLES())\n def test_double_encoding_annotations_overwrites_encoding(self, sop: rasp.SOp):\n num_sop = rasp.numerical(sop)\n cat_num_sop = rasp.categorical(num_sop)\n self.assertTrue(rasp.is_numerical(num_sop))\n self.assertTrue(rasp.is_categorical(cat_num_sop))\n\n\nclass SelectorTest(parameterized.TestCase):\n \"\"\"Tests for Selectors.\"\"\"\n\n def test_select_eq_has_correct_value(self):\n selector = rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.EQ)\n self.assertEqual(\n selector(\"hey\"), [\n [True, False, False],\n [False, True, False],\n [False, False, True],\n ])\n\n def test_select_lt_has_correct_value(self):\n selector = rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.LT)\n self.assertEqual(selector([0, 1]), [\n [False, False],\n [True, False],\n ])\n\n def test_select_leq_has_correct_value(self):\n selector = rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.LEQ)\n self.assertEqual(selector([0, 1]), [\n [True, False],\n [True, True],\n ])\n\n def test_select_gt_has_correct_value(self):\n selector = rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.GT)\n self.assertEqual(selector([0, 1]), [\n [False, True],\n [False, False],\n ])\n\n def test_select_geq_has_correct_value(self):\n selector = rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.GEQ)\n self.assertEqual(selector([0, 1]), [\n [True, True],\n [False, True],\n ])\n\n def test_select_neq_has_correct_value(self):\n selector = rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.NEQ)\n self.assertEqual(selector([0, 1]), [\n [False, True],\n [True, False],\n ])\n\n def test_select_true_has_correct_value(self):\n selector = rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.TRUE)\n self.assertEqual(selector([0, 1]), [\n [True, True],\n [True, True],\n ])\n\n def test_select_false_has_correct_value(self):\n selector = rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.FALSE)\n self.assertEqual(selector([0, 1]), [\n [False, False],\n [False, False],\n ])\n\n def test_selector_and_gets_simplified_when_keys_and_queries_match(self):\n selector = rasp.selector_and(\n rasp.Select(rasp.tokens, rasp.indices, rasp.Comparison.GEQ),\n rasp.Select(rasp.tokens, rasp.indices, rasp.Comparison.LEQ),\n )\n self.assertIsInstance(selector, rasp.Select)\n self.assertIs(selector.keys, rasp.tokens)\n self.assertIs(selector.queries, rasp.indices)\n\n def test_selector_and_doesnt_get_simplified_when_keys_queries_different(self):\n selector = rasp.selector_and(\n rasp.Select(rasp.tokens, rasp.indices, rasp.Comparison.GEQ),\n rasp.Select(rasp.indices, rasp.tokens, rasp.Comparison.LEQ),\n )\n self.assertIsInstance(selector, rasp.SelectorAnd)\n\n def test_selector_and_gets_simplified_when_keys_are_full(self):\n selector = rasp.selector_and(\n rasp.Select(rasp.Full(1), rasp.indices, rasp.Comparison.GEQ),\n rasp.Select(rasp.tokens, rasp.indices, rasp.Comparison.LEQ),\n )\n self.assertIsInstance(selector, rasp.Select)\n self.assertIs(selector.keys, rasp.tokens)\n self.assertIs(selector.queries, rasp.indices)\n\n def test_selector_and_gets_simplified_when_queries_are_full(self):\n selector = rasp.selector_and(\n rasp.Select(rasp.tokens, rasp.indices, rasp.Comparison.GEQ),\n rasp.Select(rasp.tokens, rasp.Full(1), rasp.Comparison.LEQ),\n )\n self.assertIsInstance(selector, rasp.Select)\n self.assertIs(selector.keys, rasp.tokens)\n self.assertIs(selector.queries, rasp.indices)\n\n @parameterized.parameters(\n itertools.product(\n (rasp.tokens, rasp.indices, rasp.Full(1)),\n (rasp.tokens, rasp.indices, rasp.Full(1)),\n list(rasp.Comparison),\n (rasp.tokens, rasp.indices, rasp.Full(1)),\n (rasp.tokens, rasp.indices, rasp.Full(1)),\n list(rasp.Comparison),\n ))\n def test_simplified_selector_and_works_the_same_way_as_not(\n self, fst_k, fst_q, fst_p, snd_k, snd_q, snd_p):\n fst = rasp.Select(fst_k, fst_q, fst_p)\n snd = rasp.Select(snd_k, snd_q, snd_p)\n\n simplified = rasp.selector_and(fst, snd)([0, 1, 2, 3])\n not_simplified = rasp.selector_and(fst, snd, simplify=False)([0, 1, 2, 3])\n\n np.testing.assert_array_equal(\n np.array(simplified),\n np.array(not_simplified),\n )\n\n def test_select_is_selector(self):\n self.assertIsInstance(\n rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.EQ),\n rasp.Selector,\n )\n\n def test_select_is_raspexpr(self):\n self.assertIsInstance(\n rasp.Select(rasp.tokens, rasp.tokens, rasp.Comparison.EQ),\n rasp.RASPExpr,\n )\n\n def test_constant_selector(self):\n self.assertEqual(\n rasp.ConstantSelector([[True, True], [False, False]])([1, 2]),\n [[True, True], [False, False]],\n )\n\n\nclass CopyTest(parameterized.TestCase):\n\n @parameterized.named_parameters(*_ALL_EXAMPLES())\n def test_copy_preserves_name(self, expr: rasp.RASPExpr):\n expr = expr.named(\"foo\")\n self.assertEqual(expr.copy().name, expr.name)\n\n @parameterized.named_parameters(*_ALL_EXAMPLES())\n def test_renaming_copy_doesnt_rename_original(self, expr: rasp.RASPExpr):\n expr = expr.named(\"foo\")\n expr.copy().named(\"bar\")\n self.assertEqual(expr.name, \"foo\")\n\n @parameterized.named_parameters(*_ALL_EXAMPLES())\n def test_renaming_original_doesnt_rename_copy(self, expr: rasp.RASPExpr):\n expr = expr.named(\"foo\")\n copy = expr.copy()\n expr.named(\"bar\")\n self.assertEqual(copy.name, \"foo\")\n\n @parameterized.named_parameters(*_ALL_EXAMPLES())\n def test_copy_changes_id(self, expr: rasp.RASPExpr):\n self.assertNotEqual(expr.copy().unique_id, expr.unique_id)\n\n @parameterized.named_parameters(*_ALL_EXAMPLES())\n def test_copy_preserves_child_ids(self, expr: rasp.RASPExpr):\n copy_child_ids = [c.unique_id for c in expr.copy().children]\n child_ids = [c.unique_id for c in expr.children]\n for child_id, copy_child_id in zip(child_ids, copy_child_ids):\n self.assertEqual(child_id, copy_child_id)\n\n\nclass AggregateTest(parameterized.TestCase):\n \"\"\"Tests for Aggregate.\"\"\"\n\n @parameterized.parameters(\n dict(\n selector=rasp.ConstantSelector([\n [True, False],\n [False, True],\n ]),\n sop=rasp.ConstantSOp([\"h\", \"e\"]),\n default=None,\n expected_value=[\"h\", \"e\"],\n ),\n dict(\n selector=rasp.ConstantSelector([\n [False, True],\n [False, False],\n ]),\n sop=rasp.ConstantSOp([\"h\", \"e\"]),\n default=None,\n expected_value=[\"e\", None],\n ),\n dict(\n selector=rasp.ConstantSelector([\n [True, False],\n [False, False],\n ]),\n sop=rasp.ConstantSOp([\"h\", \"e\"]),\n default=None,\n expected_value=[\"h\", None],\n ),\n dict(\n selector=rasp.ConstantSelector([\n [True, True],\n [False, True],\n ]),\n sop=rasp.ConstantSOp([0, 1]),\n default=0,\n expected_value=[0.5, 1],\n ),\n dict(\n selector=rasp.ConstantSelector([\n [False, False],\n [True, True],\n ]),\n sop=rasp.ConstantSOp([0, 1]),\n default=0,\n expected_value=[0, 0.5],\n ),\n dict(\n selector=rasp.ConstantSelector([\n [False, False],\n [True, True],\n ]),\n sop=rasp.ConstantSOp([0, 1]),\n default=None,\n expected_value=[None, 0.5],\n ),\n )\n def test_aggregate_on_size_2_inputs(self, selector, sop, default,\n expected_value):\n # The 0, 0 input is ignored as it's overridden by the constant SOps.\n self.assertEqual(\n rasp.Aggregate(selector, sop, default)([0, 0]),\n expected_value,\n )\n\n\nclass RaspProgramTest(parameterized.TestCase):\n \"\"\"Each testcase implements and tests a RASP program.\"\"\"\n\n def test_has_prev(self):\n\n def has_prev(seq: rasp.SOp) -> rasp.SOp:", "metadata": {"task_id": "deepmind--tracr/36", "ground_truth": " prev_copy = rasp.SelectorAnd(\n rasp.Select(seq, seq, rasp.Comparison.EQ),\n rasp.Select(rasp.indices, rasp.indices, rasp.Comparison.LT),\n )\n return rasp.Aggregate(prev_copy, rasp.Full(1), default=0) > 0\n", "fpath_tuple": ["deepmind_tracr", "tracr", "rasp", "rasp_test.py"], "context_start_lineno": 215, "lineno": 556, "function_name": "has_prev"}, "groundtruth": " prev_copy = rasp.SelectorAnd(\n rasp.Select(seq, seq, rasp.Comparison.EQ),\n rasp.Select(rasp.indices, rasp.indices, rasp.Comparison.LT),\n )\n return rasp.Aggregate(prev_copy, rasp.Full(1), default=0) > 0\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Assemble weights of a transformer model from a craft residual stack.\"\"\"\n\nimport dataclasses\nfrom typing import Any, Callable, Optional, List, Tuple\n\nimport chex\nimport einops\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom tracr.transformer import encoder\nfrom tracr.transformer import model\nfrom typing_extensions import Protocol\n\n\n@chex.dataclass\nclass AssembledTransformerModelOutput:\n decoded: List[Any] # length T.\n unembedded: jax.Array # [B, T] B = 1 always.\n layer_outputs: List[jax.Array] # [B, T, D]\n residuals: List[jax.Array] # [B, T, D]\n attn_logits: List[jax.Array] # [B, T, T, H]\n transformer_output: jax.Array # [B, T, D]\n input_embeddings: jax.Array\n\n\nclass ModelForward(Protocol):\n\n def __call__(\n self,\n params: hk.Params,\n emb: jax.Array,\n ) -> model.CompiledTransformerModelOutput:\n \"\"\"A hk-transformed forward pass through the compiled model.\"\"\"\n\n\n@dataclasses.dataclass\nclass AssembledTransformerModel:\n \"\"\"Model architecture and parameters from assembling a model.\"\"\"\n forward: ModelForward\n get_compiled_model: Callable[[], model.CompiledTransformerModel]\n params: hk.Params\n model_config: model.TransformerConfig\n residual_labels: List[str]\n input_encoder: Optional[encoder.Encoder] = None\n output_encoder: Optional[encoder.Encoder] = None\n\n def apply(self, tokens: List[bases.Value]) -> AssembledTransformerModelOutput:\n \"\"\"Returns output from running the model on a set of input tokens.\"\"\"", "metadata": {"task_id": "deepmind--tracr/37", "ground_truth": " if self.input_encoder:\n tokens = self.input_encoder.encode(tokens)\n tokens = jnp.array([tokens])\n output = self.forward(self.params, tokens)\n decoded = output.unembedded_output[0].tolist()\n if self.output_encoder:\n decoded = self.output_encoder.decode(decoded)\n\n if self.input_encoder.bos_token:\n # Special case for decoding the bos token position, for which the output\n # decoder might have unspecified behavior.\n decoded = [self.input_encoder.bos_token] + decoded[1:]\n\n return AssembledTransformerModelOutput(\n decoded=decoded,\n unembedded=output.unembedded_output,\n layer_outputs=output.transformer_output.layer_outputs,\n residuals=output.transformer_output.residuals,\n attn_logits=output.transformer_output.attn_logits,\n transformer_output=output.transformer_output.output,\n input_embeddings=output.transformer_output.input_embeddings)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "assemble.py"], "context_start_lineno": 0, "lineno": 67, "function_name": "apply"}, "groundtruth": " if self.input_encoder:\n tokens = self.input_encoder.encode(tokens)\n tokens = jnp.array([tokens])\n output = self.forward(self.params, tokens)\n decoded = output.unembedded_output[0].tolist()\n if self.output_encoder:\n decoded = self.output_encoder.decode(decoded)\n\n if self.input_encoder.bos_token:\n # Special case for decoding the bos token position, for which the output\n # decoder might have unspecified behavior.\n decoded = [self.input_encoder.bos_token] + decoded[1:]\n\n return AssembledTransformerModelOutput(\n decoded=decoded,\n unembedded=output.unembedded_output,\n layer_outputs=output.transformer_output.layer_outputs,\n residuals=output.transformer_output.residuals,\n attn_logits=output.transformer_output.attn_logits,\n transformer_output=output.transformer_output.output,\n input_embeddings=output.transformer_output.input_embeddings)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Assemble weights of a transformer model from a craft residual stack.\"\"\"\n\nimport dataclasses\nfrom typing import Any, Callable, Optional, List, Tuple\n\nimport chex\nimport einops\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom tracr.transformer import encoder\nfrom tracr.transformer import model\nfrom typing_extensions import Protocol\n\n\n@chex.dataclass\nclass AssembledTransformerModelOutput:\n decoded: List[Any] # length T.\n unembedded: jax.Array # [B, T] B = 1 always.\n layer_outputs: List[jax.Array] # [B, T, D]\n residuals: List[jax.Array] # [B, T, D]\n attn_logits: List[jax.Array] # [B, T, T, H]\n transformer_output: jax.Array # [B, T, D]\n input_embeddings: jax.Array\n\n\nclass ModelForward(Protocol):\n\n def __call__(\n self,\n params: hk.Params,\n emb: jax.Array,\n ) -> model.CompiledTransformerModelOutput:\n \"\"\"A hk-transformed forward pass through the compiled model.\"\"\"\n\n\n@dataclasses.dataclass\nclass AssembledTransformerModel:\n \"\"\"Model architecture and parameters from assembling a model.\"\"\"\n forward: ModelForward\n get_compiled_model: Callable[[], model.CompiledTransformerModel]\n params: hk.Params\n model_config: model.TransformerConfig\n residual_labels: List[str]\n input_encoder: Optional[encoder.Encoder] = None\n output_encoder: Optional[encoder.Encoder] = None\n\n def apply(self, tokens: List[bases.Value]) -> AssembledTransformerModelOutput:\n \"\"\"Returns output from running the model on a set of input tokens.\"\"\"\n if self.input_encoder:\n tokens = self.input_encoder.encode(tokens)\n tokens = jnp.array([tokens])\n output = self.forward(self.params, tokens)\n decoded = output.unembedded_output[0].tolist()\n if self.output_encoder:\n decoded = self.output_encoder.decode(decoded)\n\n if self.input_encoder.bos_token:\n # Special case for decoding the bos token position, for which the output\n # decoder might have unspecified behavior.\n decoded = [self.input_encoder.bos_token] + decoded[1:]\n\n return AssembledTransformerModelOutput(\n decoded=decoded,\n unembedded=output.unembedded_output,\n layer_outputs=output.transformer_output.layer_outputs,\n residuals=output.transformer_output.residuals,\n attn_logits=output.transformer_output.attn_logits,\n transformer_output=output.transformer_output.output,\n input_embeddings=output.transformer_output.input_embeddings)\n\n\n@dataclasses.dataclass\nclass EmbeddingModules:\n \"\"\"Modules for embedding and tokens and positions and unembedding results.\"\"\"\n token_embed: model.CallableHaikuModule\n pos_embed: model.CallableHaikuModule\n unembed: model.CallableHaikuModule\n\n\ndef _get_model_config_and_module_names(\n craft_model: transformers.SeriesWithResiduals\n) -> Tuple[model.TransformerConfig, List[str]]:\n \"\"\"Returns model config and locations (in params) for halflayers.\"\"\"\n\n multi_attn_heads: List[List[transformers.AttentionHead]] = []\n mlps: List[transformers.MLP] = []\n module_names: List[str] = []\n\n candidate_module_names = []\n for layer in range(len(craft_model.blocks)):\n candidate_module_names.append(f\"transformer/layer_{layer}/attn\")\n candidate_module_names.append(f\"transformer/layer_{layer}/mlp\")\n candidate_module_names = iter(candidate_module_names)\n\n for module in craft_model.blocks:\n if isinstance(module, transformers.MLP):\n mlps.append(module)\n layer_type = \"mlp\"\n else:\n multi_attn_heads.append(list(module.as_multi().heads()))\n layer_type = \"attn\"\n # Find next layer with the necessary type. Modules in-between, that are not\n # added to module_names will be disabled later by setting all weights to 0.\n module_name = next(candidate_module_names)\n while layer_type not in module_name:\n module_name = next(candidate_module_names)\n module_names.append(module_name)\n\n num_layers = int(module_names[-1].split(\"_\")[1].split(\"/\")[0]) + 1\n heads = sum(multi_attn_heads, [])\n\n if multi_attn_heads:\n num_heads = max(len(heads) for heads in multi_attn_heads)\n key_size = max(max(head.w_qk.matrix.shape) for head in heads)\n else:\n num_heads, key_size = 1, 1\n\n if mlps:\n mlp_hidden_size = max(mlp.fst.output_space.num_dims for mlp in mlps)\n else:\n mlp_hidden_size = 1\n\n model_config = model.TransformerConfig(\n num_heads=num_heads,\n num_layers=num_layers,\n key_size=key_size,\n mlp_hidden_size=mlp_hidden_size,\n dropout_rate=0.,\n activation_function=jax.nn.relu,\n layer_norm=False,\n causal=False,\n )\n\n return model_config, module_names\n\n\ndef _make_embedding_modules(\n residual_space: bases.VectorSpaceWithBasis,\n tokens_space: bases.VectorSpaceWithBasis,\n indices_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis) -> EmbeddingModules:\n \"\"\"Creates embedding and unembedding modules from vector spaces.\n\n Args:\n residual_space: Full residual space of the model.\n tokens_space: Subspace to embed tokens to.\n indices_space: Subspace to embed indices/position embeddings to.\n output_space: Subspace to unembed outputs from.\n\n Returns:\n EmbeddingModules containing modules for token embeddings, position\n embeddings and unembeddings.\n \"\"\"\n tokens_to_res = vectorspace_fns.project(tokens_space, residual_space)\n\n # If we use the 'one' direction, make sure all inputs have a 1 here\n one_dir = bases.BasisDirection(\"one\")\n if one_dir in residual_space:\n one_to_res = vectorspace_fns.Linear.from_action(\n tokens_space, residual_space,\n lambda x: residual_space.vector_from_basis_direction(one_dir))\n tokens_to_res = vectorspace_fns.Linear.combine_in_parallel(\n [tokens_to_res, one_to_res])\n\n # Token embeddings.\n res_to_out = vectorspace_fns.project(residual_space, output_space)\n token_embed = hk.Embed(\n embedding_matrix=tokens_to_res.matrix, name=\"token_embed\")\n\n # Positional embeddings.\n index_to_res = vectorspace_fns.project(indices_space, residual_space)\n # The zeroth position should not have any positional embeddings,\n # so we add one line of padding at the zeroth position.\n pos_matrix = np.concatenate(\n [np.zeros((1, residual_space.num_dims)), index_to_res.matrix], axis=0)\n pos_embed = hk.Embed(embedding_matrix=pos_matrix, name=\"pos_embed\")\n\n def unembed(x, use_unembed_argmax):", "metadata": {"task_id": "deepmind--tracr/38", "ground_truth": " out = x @ res_to_out.matrix\n if use_unembed_argmax:\n return jnp.argmax(out, axis=-1)\n elif out.shape[-1] == 1:\n return out.squeeze(-1)\n return out\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "assemble.py"], "context_start_lineno": 0, "lineno": 197, "function_name": "unembed"}, "groundtruth": " out = x @ res_to_out.matrix\n if use_unembed_argmax:\n return jnp.argmax(out, axis=-1)\n elif out.shape[-1] == 1:\n return out.squeeze(-1)\n return out\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Assemble weights of a transformer model from a craft residual stack.\"\"\"\n\nimport dataclasses\nfrom typing import Any, Callable, Optional, List, Tuple\n\nimport chex\nimport einops\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom tracr.transformer import encoder\nfrom tracr.transformer import model\nfrom typing_extensions import Protocol\n\n\n@chex.dataclass\nclass AssembledTransformerModelOutput:\n decoded: List[Any] # length T.\n unembedded: jax.Array # [B, T] B = 1 always.\n layer_outputs: List[jax.Array] # [B, T, D]\n residuals: List[jax.Array] # [B, T, D]\n attn_logits: List[jax.Array] # [B, T, T, H]\n transformer_output: jax.Array # [B, T, D]\n input_embeddings: jax.Array\n\n\nclass ModelForward(Protocol):\n\n def __call__(\n self,\n params: hk.Params,\n emb: jax.Array,\n ) -> model.CompiledTransformerModelOutput:\n \"\"\"A hk-transformed forward pass through the compiled model.\"\"\"\n\n\n@dataclasses.dataclass\nclass AssembledTransformerModel:\n \"\"\"Model architecture and parameters from assembling a model.\"\"\"\n forward: ModelForward\n get_compiled_model: Callable[[], model.CompiledTransformerModel]\n params: hk.Params\n model_config: model.TransformerConfig\n residual_labels: List[str]\n input_encoder: Optional[encoder.Encoder] = None\n output_encoder: Optional[encoder.Encoder] = None\n\n def apply(self, tokens: List[bases.Value]) -> AssembledTransformerModelOutput:\n \"\"\"Returns output from running the model on a set of input tokens.\"\"\"\n if self.input_encoder:\n tokens = self.input_encoder.encode(tokens)\n tokens = jnp.array([tokens])\n output = self.forward(self.params, tokens)\n decoded = output.unembedded_output[0].tolist()\n if self.output_encoder:\n decoded = self.output_encoder.decode(decoded)\n\n if self.input_encoder.bos_token:\n # Special case for decoding the bos token position, for which the output\n # decoder might have unspecified behavior.\n decoded = [self.input_encoder.bos_token] + decoded[1:]\n\n return AssembledTransformerModelOutput(\n decoded=decoded,\n unembedded=output.unembedded_output,\n layer_outputs=output.transformer_output.layer_outputs,\n residuals=output.transformer_output.residuals,\n attn_logits=output.transformer_output.attn_logits,\n transformer_output=output.transformer_output.output,\n input_embeddings=output.transformer_output.input_embeddings)\n\n\n@dataclasses.dataclass\nclass EmbeddingModules:\n \"\"\"Modules for embedding and tokens and positions and unembedding results.\"\"\"\n token_embed: model.CallableHaikuModule\n pos_embed: model.CallableHaikuModule\n unembed: model.CallableHaikuModule\n\n\ndef _get_model_config_and_module_names(\n craft_model: transformers.SeriesWithResiduals\n) -> Tuple[model.TransformerConfig, List[str]]:\n \"\"\"Returns model config and locations (in params) for halflayers.\"\"\"\n\n multi_attn_heads: List[List[transformers.AttentionHead]] = []\n mlps: List[transformers.MLP] = []\n module_names: List[str] = []\n\n candidate_module_names = []\n for layer in range(len(craft_model.blocks)):\n candidate_module_names.append(f\"transformer/layer_{layer}/attn\")\n candidate_module_names.append(f\"transformer/layer_{layer}/mlp\")\n candidate_module_names = iter(candidate_module_names)\n\n for module in craft_model.blocks:\n if isinstance(module, transformers.MLP):\n mlps.append(module)\n layer_type = \"mlp\"\n else:\n multi_attn_heads.append(list(module.as_multi().heads()))\n layer_type = \"attn\"\n # Find next layer with the necessary type. Modules in-between, that are not\n # added to module_names will be disabled later by setting all weights to 0.\n module_name = next(candidate_module_names)\n while layer_type not in module_name:\n module_name = next(candidate_module_names)\n module_names.append(module_name)\n\n num_layers = int(module_names[-1].split(\"_\")[1].split(\"/\")[0]) + 1\n heads = sum(multi_attn_heads, [])\n\n if multi_attn_heads:\n num_heads = max(len(heads) for heads in multi_attn_heads)\n key_size = max(max(head.w_qk.matrix.shape) for head in heads)\n else:\n num_heads, key_size = 1, 1\n\n if mlps:\n mlp_hidden_size = max(mlp.fst.output_space.num_dims for mlp in mlps)\n else:\n mlp_hidden_size = 1\n\n model_config = model.TransformerConfig(\n num_heads=num_heads,\n num_layers=num_layers,\n key_size=key_size,\n mlp_hidden_size=mlp_hidden_size,\n dropout_rate=0.,\n activation_function=jax.nn.relu,\n layer_norm=False,\n causal=False,\n )\n\n return model_config, module_names\n\n\ndef _make_embedding_modules(\n residual_space: bases.VectorSpaceWithBasis,\n tokens_space: bases.VectorSpaceWithBasis,\n indices_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis) -> EmbeddingModules:\n \"\"\"Creates embedding and unembedding modules from vector spaces.\n\n Args:\n residual_space: Full residual space of the model.\n tokens_space: Subspace to embed tokens to.\n indices_space: Subspace to embed indices/position embeddings to.\n output_space: Subspace to unembed outputs from.\n\n Returns:\n EmbeddingModules containing modules for token embeddings, position\n embeddings and unembeddings.\n \"\"\"\n tokens_to_res = vectorspace_fns.project(tokens_space, residual_space)\n\n # If we use the 'one' direction, make sure all inputs have a 1 here\n one_dir = bases.BasisDirection(\"one\")\n if one_dir in residual_space:\n one_to_res = vectorspace_fns.Linear.from_action(\n tokens_space, residual_space,\n lambda x: residual_space.vector_from_basis_direction(one_dir))\n tokens_to_res = vectorspace_fns.Linear.combine_in_parallel(\n [tokens_to_res, one_to_res])\n\n # Token embeddings.\n res_to_out = vectorspace_fns.project(residual_space, output_space)\n token_embed = hk.Embed(\n embedding_matrix=tokens_to_res.matrix, name=\"token_embed\")\n\n # Positional embeddings.\n index_to_res = vectorspace_fns.project(indices_space, residual_space)\n # The zeroth position should not have any positional embeddings,\n # so we add one line of padding at the zeroth position.\n pos_matrix = np.concatenate(\n [np.zeros((1, residual_space.num_dims)), index_to_res.matrix], axis=0)\n pos_embed = hk.Embed(embedding_matrix=pos_matrix, name=\"pos_embed\")\n\n def unembed(x, use_unembed_argmax):\n out = x @ res_to_out.matrix\n if use_unembed_argmax:\n return jnp.argmax(out, axis=-1)\n elif out.shape[-1] == 1:\n return out.squeeze(-1)\n return out\n\n unembed_mod = hk.to_module(unembed)()\n return EmbeddingModules(\n token_embed=token_embed, pos_embed=pos_embed, unembed=unembed_mod)\n\n\ndef assemble_craft_model(\n craft_model: transformers.SeriesWithResiduals,\n tokens_space: bases.VectorSpaceWithBasis,\n indices_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n categorical_output: bool,\n causal: bool = False,\n) -> AssembledTransformerModel:\n \"\"\"Assembles the given components into a Haiku model with parameters.\n\n Args:\n craft_model: Model to assemble weights for.\n tokens_space: Vectorspace to embed the input tokens to.\n indices_space: Vectorspace to embed the indices to (position encodings).\n output_space: Vectorspace that the model will write outputs to that should\n be unembedded.\n categorical_output: Whether the output is categorical. If True, we take an\n argmax when unembedding.\n causal: Whether to output a causally-masked model.\n\n Returns:\n An AssembledTransformerModel that contains the model and parameters of the\n assembled transformer.\n \"\"\"\n # TODO(b/255936413): Make embeddings only retain the tokens and indices that\n # are actually used.\n # TODO(b/255936496): Think about enabling layer norm and reversing it somehow\n\n model_config, module_names = _get_model_config_and_module_names(craft_model)\n model_config.causal = causal\n\n residual_space = bases.join_vector_spaces(craft_model.residual_space,\n tokens_space, indices_space,\n output_space)\n residual_labels = [str(basis_dir) for basis_dir in residual_space.basis]\n\n # Build model with embedding and unembedding layers\n def get_compiled_model():", "metadata": {"task_id": "deepmind--tracr/39", "ground_truth": " transformer = model.Transformer(model_config)\n embed_modules = _make_embedding_modules(\n residual_space=residual_space,\n tokens_space=tokens_space,\n indices_space=indices_space,\n output_space=output_space)\n return model.CompiledTransformerModel(\n transformer=transformer,\n token_embed=embed_modules.token_embed,\n position_embed=embed_modules.pos_embed,\n unembed=embed_modules.unembed,\n use_unembed_argmax=categorical_output)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "assemble.py"], "context_start_lineno": 0, "lineno": 247, "function_name": "get_compiled_model"}, "groundtruth": " transformer = model.Transformer(model_config)\n embed_modules = _make_embedding_modules(\n residual_space=residual_space,\n tokens_space=tokens_space,\n indices_space=indices_space,\n output_space=output_space)\n return model.CompiledTransformerModel(\n transformer=transformer,\n token_embed=embed_modules.token_embed,\n position_embed=embed_modules.pos_embed,\n unembed=embed_modules.unembed,\n use_unembed_argmax=categorical_output)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Integration tests for the full RASP -> transformer compilation.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport numpy as np\n\nfrom tracr.compiler import compiling\nfrom tracr.compiler import lib\nfrom tracr.compiler import test_cases\nfrom tracr.craft import tests_common\nfrom tracr.rasp import rasp\n\n_COMPILER_BOS = \"rasp_to_transformer_integration_test_BOS\"\n_COMPILER_PAD = \"rasp_to_transformer_integration_test_PAD\"\n\n# Force float32 precision on TPU, which otherwise defaults to float16.\njax.config.update(\"jax_default_matmul_precision\", \"float32\")\n\n\nclass CompilerIntegrationTest(tests_common.VectorFnTestCase):\n\n def assertSequenceEqualWhenExpectedIsNotNone(self, actual_seq, expected_seq):", "metadata": {"task_id": "deepmind--tracr/40", "ground_truth": " for actual, expected in zip(actual_seq, expected_seq):\n if expected is not None and actual != expected:\n self.fail(f\"{actual_seq} does not match (ignoring Nones) \"\n f\"expected_seq={expected_seq}\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "rasp_to_transformer_integration_test.py"], "context_start_lineno": 0, "lineno": 37, "function_name": "assertSequenceEqualWhenExpectedIsNotNone"}, "groundtruth": " for actual, expected in zip(actual_seq, expected_seq):\n if expected is not None and actual != expected:\n self.fail(f\"{actual_seq} does not match (ignoring Nones) \"\n f\"expected_seq={expected_seq}\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for compiler.expr_to_craft_graph.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom tracr.compiler import basis_inference\nfrom tracr.compiler import expr_to_craft_graph\nfrom tracr.compiler import lib\nfrom tracr.compiler import nodes\nfrom tracr.compiler import rasp_to_graph\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\n\nclass ExprToCraftGraphTest(parameterized.TestCase):\n\n def _check_block_types_are_correct(self, graph):", "metadata": {"task_id": "deepmind--tracr/41", "ground_truth": " for _, node in graph.nodes.items():\n expr = node[nodes.EXPR]\n if isinstance(expr, rasp.SOp):\n block = node[nodes.MODEL_BLOCK]\n if isinstance(expr, (rasp.Map, rasp.SequenceMap)):\n self.assertIsInstance(block, transformers.MLP)\n elif isinstance(expr, rasp.Aggregate):\n self.assertIsInstance(block, transformers.AttentionHead)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "expr_to_craft_graph_test.py"], "context_start_lineno": 0, "lineno": 31, "function_name": "_check_block_types_are_correct"}, "groundtruth": " for _, node in graph.nodes.items():\n expr = node[nodes.EXPR]\n if isinstance(expr, rasp.SOp):\n block = node[nodes.MODEL_BLOCK]\n if isinstance(expr, (rasp.Map, rasp.SequenceMap)):\n self.assertIsInstance(block, transformers.MLP)\n elif isinstance(expr, rasp.Aggregate):\n self.assertIsInstance(block, transformers.AttentionHead)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for compiler.expr_to_craft_graph.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom tracr.compiler import basis_inference\nfrom tracr.compiler import expr_to_craft_graph\nfrom tracr.compiler import lib\nfrom tracr.compiler import nodes\nfrom tracr.compiler import rasp_to_graph\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\n\nclass ExprToCraftGraphTest(parameterized.TestCase):\n\n def _check_block_types_are_correct(self, graph):\n for _, node in graph.nodes.items():\n expr = node[nodes.EXPR]\n if isinstance(expr, rasp.SOp):\n block = node[nodes.MODEL_BLOCK]\n if isinstance(expr, (rasp.Map, rasp.SequenceMap)):\n self.assertIsInstance(block, transformers.MLP)\n elif isinstance(expr, rasp.Aggregate):\n self.assertIsInstance(block, transformers.AttentionHead)\n\n def _get_input_space_from_node(self, node):", "metadata": {"task_id": "deepmind--tracr/42", "ground_truth": " block = node[nodes.MODEL_BLOCK]\n if isinstance(block, transformers.MLP):\n return block.fst.input_space\n elif isinstance(block, transformers.AttentionHead):\n return bases.join_vector_spaces(block.w_qk.left_space,\n block.w_qk.right_space,\n block.w_ov.input_space)\n else:\n return None\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "expr_to_craft_graph_test.py"], "context_start_lineno": 0, "lineno": 41, "function_name": "_get_input_space_from_node"}, "groundtruth": " block = node[nodes.MODEL_BLOCK]\n if isinstance(block, transformers.MLP):\n return block.fst.input_space\n elif isinstance(block, transformers.AttentionHead):\n return bases.join_vector_spaces(block.w_qk.left_space,\n block.w_qk.right_space,\n block.w_ov.input_space)\n else:\n return None\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for compiler.expr_to_craft_graph.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom tracr.compiler import basis_inference\nfrom tracr.compiler import expr_to_craft_graph\nfrom tracr.compiler import lib\nfrom tracr.compiler import nodes\nfrom tracr.compiler import rasp_to_graph\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\n\nclass ExprToCraftGraphTest(parameterized.TestCase):\n\n def _check_block_types_are_correct(self, graph):\n for _, node in graph.nodes.items():\n expr = node[nodes.EXPR]\n if isinstance(expr, rasp.SOp):\n block = node[nodes.MODEL_BLOCK]\n if isinstance(expr, (rasp.Map, rasp.SequenceMap)):\n self.assertIsInstance(block, transformers.MLP)\n elif isinstance(expr, rasp.Aggregate):\n self.assertIsInstance(block, transformers.AttentionHead)\n\n def _get_input_space_from_node(self, node):\n block = node[nodes.MODEL_BLOCK]\n if isinstance(block, transformers.MLP):\n return block.fst.input_space\n elif isinstance(block, transformers.AttentionHead):\n return bases.join_vector_spaces(block.w_qk.left_space,\n block.w_qk.right_space,\n block.w_ov.input_space)\n else:\n return None\n\n def _check_spaces_are_consistent(self, graph):\n \"\"\"Check that for each edge the output is a subspace of the input.\"\"\"", "metadata": {"task_id": "deepmind--tracr/43", "ground_truth": " for u, v in graph.edges:\n u_node, v_node = graph.nodes[u], graph.nodes[v]\n if isinstance(u_node[nodes.EXPR], rasp.SOp) and isinstance(\n v_node[nodes.EXPR], rasp.SOp):\n u_out_basis = u_node[nodes.OUTPUT_BASIS]\n u_out_space = bases.VectorSpaceWithBasis(u_out_basis)\n v_in_space = self._get_input_space_from_node(v_node)\n self.assertTrue(u_out_space.issubspace(v_in_space))\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "expr_to_craft_graph_test.py"], "context_start_lineno": 0, "lineno": 53, "function_name": "_check_spaces_are_consistent"}, "groundtruth": " for u, v in graph.edges:\n u_node, v_node = graph.nodes[u], graph.nodes[v]\n if isinstance(u_node[nodes.EXPR], rasp.SOp) and isinstance(\n v_node[nodes.EXPR], rasp.SOp):\n u_out_basis = u_node[nodes.OUTPUT_BASIS]\n u_out_space = bases.VectorSpaceWithBasis(u_out_basis)\n v_in_space = self._get_input_space_from_node(v_node)\n self.assertTrue(u_out_space.issubspace(v_in_space))\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Create a craft model from a computational graph.\"\"\"\n\nimport collections\nfrom typing import Dict, List, Sequence\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\nNode = nodes.Node\nNodeID = nodes.NodeID\n\n\ndef _get_longest_path_length_to_node(graph: nx.DiGraph, sources: Sequence[Node],\n node: Node) -> int:\n \"\"\"Returns the lengths of the longest path from sources to node.\n\n Only SOps count towards the length of a path.\n\n Args:\n graph: DAG to compute longest path in.\n sources: List of starting nodes, longest path will be a maximum over all.\n node: Target node.\n\n Returns:\n Number of steps needed for the longest path from the source to the node, or\n -1 if there is no path from any of the sources to the target node.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/44", "ground_truth": " if node in sources:\n return 0\n\n def num_sops(path: Sequence[NodeID]) -> int:\n num = 0\n for node_id in path:\n if isinstance(graph.nodes[node_id][nodes.EXPR], rasp.SOp):\n num += 1\n return num\n\n result = -1\n for source in sources:\n all_paths = nx.all_simple_paths(graph, source[nodes.ID], node[nodes.ID])\n longest_path_len = max(map(num_sops, all_paths), default=-1) - 1\n if longest_path_len > result:\n result = longest_path_len\n return result\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "craft_graph_to_model.py"], "context_start_lineno": 0, "lineno": 44, "function_name": "_get_longest_path_length_to_node"}, "groundtruth": " if node in sources:\n return 0\n\n def num_sops(path: Sequence[NodeID]) -> int:\n num = 0\n for node_id in path:\n if isinstance(graph.nodes[node_id][nodes.EXPR], rasp.SOp):\n num += 1\n return num\n\n result = -1\n for source in sources:\n all_paths = nx.all_simple_paths(graph, source[nodes.ID], node[nodes.ID])\n longest_path_len = max(map(num_sops, all_paths), default=-1) - 1\n if longest_path_len > result:\n result = longest_path_len\n return result\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Create a craft model from a computational graph.\"\"\"\n\nimport collections\nfrom typing import Dict, List, Sequence\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\nNode = nodes.Node\nNodeID = nodes.NodeID\n\n\ndef _get_longest_path_length_to_node(graph: nx.DiGraph, sources: Sequence[Node],\n node: Node) -> int:\n \"\"\"Returns the lengths of the longest path from sources to node.\n\n Only SOps count towards the length of a path.\n\n Args:\n graph: DAG to compute longest path in.\n sources: List of starting nodes, longest path will be a maximum over all.\n node: Target node.\n\n Returns:\n Number of steps needed for the longest path from the source to the node, or\n -1 if there is no path from any of the sources to the target node.\n \"\"\"\n if node in sources:\n return 0\n\n def num_sops(path: Sequence[NodeID]) -> int:", "metadata": {"task_id": "deepmind--tracr/45", "ground_truth": " num = 0\n for node_id in path:\n if isinstance(graph.nodes[node_id][nodes.EXPR], rasp.SOp):\n num += 1\n return num\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "craft_graph_to_model.py"], "context_start_lineno": 0, "lineno": 48, "function_name": "num_sops"}, "groundtruth": " num = 0\n for node_id in path:\n if isinstance(graph.nodes[node_id][nodes.EXPR], rasp.SOp):\n num += 1\n return num\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Create a craft model from a computational graph.\"\"\"\n\nimport collections\nfrom typing import Dict, List, Sequence\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\nNode = nodes.Node\nNodeID = nodes.NodeID\n\n\ndef _get_longest_path_length_to_node(graph: nx.DiGraph, sources: Sequence[Node],\n node: Node) -> int:\n \"\"\"Returns the lengths of the longest path from sources to node.\n\n Only SOps count towards the length of a path.\n\n Args:\n graph: DAG to compute longest path in.\n sources: List of starting nodes, longest path will be a maximum over all.\n node: Target node.\n\n Returns:\n Number of steps needed for the longest path from the source to the node, or\n -1 if there is no path from any of the sources to the target node.\n \"\"\"\n if node in sources:\n return 0\n\n def num_sops(path: Sequence[NodeID]) -> int:\n num = 0\n for node_id in path:\n if isinstance(graph.nodes[node_id][nodes.EXPR], rasp.SOp):\n num += 1\n return num\n\n result = -1\n for source in sources:\n all_paths = nx.all_simple_paths(graph, source[nodes.ID], node[nodes.ID])\n longest_path_len = max(map(num_sops, all_paths), default=-1) - 1\n if longest_path_len > result:\n result = longest_path_len\n return result\n\n\ndef _node_is_attn(node: Node) -> bool:\n \"\"\"Returns True if node is an attention layer.\"\"\"", "metadata": {"task_id": "deepmind--tracr/46", "ground_truth": " return nodes.MODEL_BLOCK in node and isinstance(\n node[nodes.MODEL_BLOCK],\n (transformers.AttentionHead, transformers.MultiAttentionHead))\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "craft_graph_to_model.py"], "context_start_lineno": 0, "lineno": 65, "function_name": "_node_is_attn"}, "groundtruth": " return nodes.MODEL_BLOCK in node and isinstance(\n node[nodes.MODEL_BLOCK],\n (transformers.AttentionHead, transformers.MultiAttentionHead))\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Create a craft model from a computational graph.\"\"\"\n\nimport collections\nfrom typing import Dict, List, Sequence\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\nNode = nodes.Node\nNodeID = nodes.NodeID\n\n\ndef _get_longest_path_length_to_node(graph: nx.DiGraph, sources: Sequence[Node],\n node: Node) -> int:\n \"\"\"Returns the lengths of the longest path from sources to node.\n\n Only SOps count towards the length of a path.\n\n Args:\n graph: DAG to compute longest path in.\n sources: List of starting nodes, longest path will be a maximum over all.\n node: Target node.\n\n Returns:\n Number of steps needed for the longest path from the source to the node, or\n -1 if there is no path from any of the sources to the target node.\n \"\"\"\n if node in sources:\n return 0\n\n def num_sops(path: Sequence[NodeID]) -> int:\n num = 0\n for node_id in path:\n if isinstance(graph.nodes[node_id][nodes.EXPR], rasp.SOp):\n num += 1\n return num\n\n result = -1\n for source in sources:\n all_paths = nx.all_simple_paths(graph, source[nodes.ID], node[nodes.ID])\n longest_path_len = max(map(num_sops, all_paths), default=-1) - 1\n if longest_path_len > result:\n result = longest_path_len\n return result\n\n\ndef _node_is_attn(node: Node) -> bool:\n \"\"\"Returns True if node is an attention layer.\"\"\"\n return nodes.MODEL_BLOCK in node and isinstance(\n node[nodes.MODEL_BLOCK],\n (transformers.AttentionHead, transformers.MultiAttentionHead))\n\n\ndef _node_is_mlp(node: Node) -> bool:\n \"\"\"Returns True if node is an MLP layer.\"\"\"\n return nodes.MODEL_BLOCK in node and isinstance(node[nodes.MODEL_BLOCK],\n transformers.MLP)\n\n\ndef _node_is_residual_block(node: Node) -> bool:\n \"\"\"Returns True if node is a valid residual block (Attn followed by MLP).\"\"\"", "metadata": {"task_id": "deepmind--tracr/47", "ground_truth": " block = node[nodes.MODEL_BLOCK] if nodes.MODEL_BLOCK in node else None\n if block and isinstance(block, transformers.SeriesWithResiduals):\n if len(block.blocks) == 2:\n attn, mlp = block.blocks\n if (isinstance(\n attn,\n (transformers.AttentionHead, transformers.MultiAttentionHead)) and\n isinstance(mlp, transformers.MLP)):\n return True\n return False\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "craft_graph_to_model.py"], "context_start_lineno": 0, "lineno": 78, "function_name": "_node_is_residual_block"}, "groundtruth": " block = node[nodes.MODEL_BLOCK] if nodes.MODEL_BLOCK in node else None\n if block and isinstance(block, transformers.SeriesWithResiduals):\n if len(block.blocks) == 2:\n attn, mlp = block.blocks\n if (isinstance(\n attn,\n (transformers.AttentionHead, transformers.MultiAttentionHead)) and\n isinstance(mlp, transformers.MLP)):\n return True\n return False\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Create a craft model from a computational graph.\"\"\"\n\nimport collections\nfrom typing import Dict, List, Sequence\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\nNode = nodes.Node\nNodeID = nodes.NodeID\n\n\ndef _get_longest_path_length_to_node(graph: nx.DiGraph, sources: Sequence[Node],\n node: Node) -> int:\n \"\"\"Returns the lengths of the longest path from sources to node.\n\n Only SOps count towards the length of a path.\n\n Args:\n graph: DAG to compute longest path in.\n sources: List of starting nodes, longest path will be a maximum over all.\n node: Target node.\n\n Returns:\n Number of steps needed for the longest path from the source to the node, or\n -1 if there is no path from any of the sources to the target node.\n \"\"\"\n if node in sources:\n return 0\n\n def num_sops(path: Sequence[NodeID]) -> int:\n num = 0\n for node_id in path:\n if isinstance(graph.nodes[node_id][nodes.EXPR], rasp.SOp):\n num += 1\n return num\n\n result = -1\n for source in sources:\n all_paths = nx.all_simple_paths(graph, source[nodes.ID], node[nodes.ID])\n longest_path_len = max(map(num_sops, all_paths), default=-1) - 1\n if longest_path_len > result:\n result = longest_path_len\n return result\n\n\ndef _node_is_attn(node: Node) -> bool:\n \"\"\"Returns True if node is an attention layer.\"\"\"\n return nodes.MODEL_BLOCK in node and isinstance(\n node[nodes.MODEL_BLOCK],\n (transformers.AttentionHead, transformers.MultiAttentionHead))\n\n\ndef _node_is_mlp(node: Node) -> bool:\n \"\"\"Returns True if node is an MLP layer.\"\"\"\n return nodes.MODEL_BLOCK in node and isinstance(node[nodes.MODEL_BLOCK],\n transformers.MLP)\n\n\ndef _node_is_residual_block(node: Node) -> bool:\n \"\"\"Returns True if node is a valid residual block (Attn followed by MLP).\"\"\"\n block = node[nodes.MODEL_BLOCK] if nodes.MODEL_BLOCK in node else None\n if block and isinstance(block, transformers.SeriesWithResiduals):\n if len(block.blocks) == 2:\n attn, mlp = block.blocks\n if (isinstance(\n attn,\n (transformers.AttentionHead, transformers.MultiAttentionHead)) and\n isinstance(mlp, transformers.MLP)):\n return True\n return False\n\n\ndef _all_attn_nodes(node_list: Sequence[Node]) -> bool:\n \"\"\"Returns True iff all nodes are attention layers (or nodes is empty).\"\"\"", "metadata": {"task_id": "deepmind--tracr/48", "ground_truth": " for node in node_list:\n if not _node_is_attn(node):\n return False\n return True\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "craft_graph_to_model.py"], "context_start_lineno": 0, "lineno": 92, "function_name": "_all_attn_nodes"}, "groundtruth": " for node in node_list:\n if not _node_is_attn(node):\n return False\n return True\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Create a craft model from a computational graph.\"\"\"\n\nimport collections\nfrom typing import Dict, List, Sequence\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.rasp import rasp\n\nNode = nodes.Node\nNodeID = nodes.NodeID\n\n\ndef _get_longest_path_length_to_node(graph: nx.DiGraph, sources: Sequence[Node],\n node: Node) -> int:\n \"\"\"Returns the lengths of the longest path from sources to node.\n\n Only SOps count towards the length of a path.\n\n Args:\n graph: DAG to compute longest path in.\n sources: List of starting nodes, longest path will be a maximum over all.\n node: Target node.\n\n Returns:\n Number of steps needed for the longest path from the source to the node, or\n -1 if there is no path from any of the sources to the target node.\n \"\"\"\n if node in sources:\n return 0\n\n def num_sops(path: Sequence[NodeID]) -> int:\n num = 0\n for node_id in path:\n if isinstance(graph.nodes[node_id][nodes.EXPR], rasp.SOp):\n num += 1\n return num\n\n result = -1\n for source in sources:\n all_paths = nx.all_simple_paths(graph, source[nodes.ID], node[nodes.ID])\n longest_path_len = max(map(num_sops, all_paths), default=-1) - 1\n if longest_path_len > result:\n result = longest_path_len\n return result\n\n\ndef _node_is_attn(node: Node) -> bool:\n \"\"\"Returns True if node is an attention layer.\"\"\"\n return nodes.MODEL_BLOCK in node and isinstance(\n node[nodes.MODEL_BLOCK],\n (transformers.AttentionHead, transformers.MultiAttentionHead))\n\n\ndef _node_is_mlp(node: Node) -> bool:\n \"\"\"Returns True if node is an MLP layer.\"\"\"\n return nodes.MODEL_BLOCK in node and isinstance(node[nodes.MODEL_BLOCK],\n transformers.MLP)\n\n\ndef _node_is_residual_block(node: Node) -> bool:\n \"\"\"Returns True if node is a valid residual block (Attn followed by MLP).\"\"\"\n block = node[nodes.MODEL_BLOCK] if nodes.MODEL_BLOCK in node else None\n if block and isinstance(block, transformers.SeriesWithResiduals):\n if len(block.blocks) == 2:\n attn, mlp = block.blocks\n if (isinstance(\n attn,\n (transformers.AttentionHead, transformers.MultiAttentionHead)) and\n isinstance(mlp, transformers.MLP)):\n return True\n return False\n\n\ndef _all_attn_nodes(node_list: Sequence[Node]) -> bool:\n \"\"\"Returns True iff all nodes are attention layers (or nodes is empty).\"\"\"\n for node in node_list:\n if not _node_is_attn(node):\n return False\n return True\n\n\ndef _all_mlp_nodes(node_list: Sequence[Node]) -> bool:\n \"\"\"Returns True iff all nodes are MLP layers (or nodes is empty).\"\"\"", "metadata": {"task_id": "deepmind--tracr/49", "ground_truth": " for node in node_list:\n if not _node_is_mlp(node):\n return False\n return True\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "craft_graph_to_model.py"], "context_start_lineno": 0, "lineno": 100, "function_name": "_all_mlp_nodes"}, "groundtruth": " for node in node_list:\n if not _node_is_mlp(node):\n return False\n return True\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Add craft model blocks to graph of RASPExpr.\"\"\"\n\nfrom typing import Any, Callable, Optional\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.craft import bases\nfrom tracr.craft.chamber import categorical_attn\nfrom tracr.craft.chamber import categorical_mlp\nfrom tracr.craft.chamber import numerical_mlp\nfrom tracr.craft.chamber import selector_width\nfrom tracr.rasp import rasp\n\n\ndef _transform_fun_to_basis_fun(\n fun: Callable[..., Any],\n output_direction_name: Optional[str] = None) -> Callable[..., Any]:\n \"\"\"Transforms a function acting on values into one acting on directions.\"\"\"", "metadata": {"task_id": "deepmind--tracr/50", "ground_truth": " def bases_fun(*args):\n values = [d.value for d in args]\n result = fun(*values)\n if output_direction_name:\n return bases.BasisDirection(output_direction_name, result)\n return result\n\n return bases_fun\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "expr_to_craft_graph.py"], "context_start_lineno": 0, "lineno": 33, "function_name": "_transform_fun_to_basis_fun"}, "groundtruth": " def bases_fun(*args):\n values = [d.value for d in args]\n result = fun(*values)\n if output_direction_name:\n return bases.BasisDirection(output_direction_name, result)\n return result\n\n return bases_fun\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Add craft model blocks to graph of RASPExpr.\"\"\"\n\nfrom typing import Any, Callable, Optional\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.craft import bases\nfrom tracr.craft.chamber import categorical_attn\nfrom tracr.craft.chamber import categorical_mlp\nfrom tracr.craft.chamber import numerical_mlp\nfrom tracr.craft.chamber import selector_width\nfrom tracr.rasp import rasp\n\n\ndef _transform_fun_to_basis_fun(\n fun: Callable[..., Any],\n output_direction_name: Optional[str] = None) -> Callable[..., Any]:\n \"\"\"Transforms a function acting on values into one acting on directions.\"\"\"\n\n def bases_fun(*args):", "metadata": {"task_id": "deepmind--tracr/51", "ground_truth": " values = [d.value for d in args]\n result = fun(*values)\n if output_direction_name:\n return bases.BasisDirection(output_direction_name, result)\n return result\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "expr_to_craft_graph.py"], "context_start_lineno": 0, "lineno": 34, "function_name": "bases_fun"}, "groundtruth": " values = [d.value for d in args]\n result = fun(*values)\n if output_direction_name:\n return bases.BasisDirection(output_direction_name, result)\n return result\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for compiler.craft_graph_to_model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport networkx as nx\nfrom tracr.compiler import craft_graph_to_model\nfrom tracr.compiler import nodes\nfrom tracr.compiler import rasp_to_graph\nfrom tracr.craft import bases\nfrom tracr.craft.chamber import categorical_attn\nfrom tracr.craft.chamber import categorical_mlp\nfrom tracr.rasp import rasp\n\n\nclass CraftAllocateModulesToLayersTest(parameterized.TestCase):\n\n def _get_dummy_block(self, block_type):", "metadata": {"task_id": "deepmind--tracr/52", "ground_truth": " if block_type == \"ATTN\":\n return categorical_attn.categorical_attn(\n query_space=bases.VectorSpaceWithBasis.from_names([\"query\"]),\n key_space=bases.VectorSpaceWithBasis.from_names([\"bos\", \"key\"]),\n value_space=bases.VectorSpaceWithBasis.from_names([\"bos\", \"value\"]),\n output_space=bases.VectorSpaceWithBasis.from_names([\"output\"]),\n bos_space=bases.VectorSpaceWithBasis.from_names([\"bos\"]),\n one_space=bases.VectorSpaceWithBasis.from_names([\"one\"]),\n attn_fn=lambda x, y: True,\n )\n elif block_type == \"MLP\":\n return categorical_mlp.map_categorical_mlp(\n input_space=bases.VectorSpaceWithBasis.from_names([\"input\"]),\n output_space=bases.VectorSpaceWithBasis.from_names([\"output\"]),\n operation=lambda x: x,\n )\n else:\n return None\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "craft_graph_to_model_test.py"], "context_start_lineno": 0, "lineno": 31, "function_name": "_get_dummy_block"}, "groundtruth": " if block_type == \"ATTN\":\n return categorical_attn.categorical_attn(\n query_space=bases.VectorSpaceWithBasis.from_names([\"query\"]),\n key_space=bases.VectorSpaceWithBasis.from_names([\"bos\", \"key\"]),\n value_space=bases.VectorSpaceWithBasis.from_names([\"bos\", \"value\"]),\n output_space=bases.VectorSpaceWithBasis.from_names([\"output\"]),\n bos_space=bases.VectorSpaceWithBasis.from_names([\"bos\"]),\n one_space=bases.VectorSpaceWithBasis.from_names([\"one\"]),\n attn_fn=lambda x, y: True,\n )\n elif block_type == \"MLP\":\n return categorical_mlp.map_categorical_mlp(\n input_space=bases.VectorSpaceWithBasis.from_names([\"input\"]),\n output_space=bases.VectorSpaceWithBasis.from_names([\"output\"]),\n operation=lambda x: x,\n )\n else:\n return None\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converting a RaspExpr to a graph.\"\"\"\n\nimport dataclasses\nimport queue\nfrom typing import List\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.rasp import rasp\n\nNode = nodes.Node\nNodeID = nodes.NodeID\n\n\n@dataclasses.dataclass\nclass ExtractRaspGraphOutput:\n graph: nx.DiGraph\n sink: Node # the program's output.\n sources: List[Node] # the primitive S-Ops.\n\n\ndef extract_rasp_graph(tip: rasp.SOp) -> ExtractRaspGraphOutput:\n \"\"\"Converts a RASP program into a graph representation.\"\"\"\n expr_queue = queue.Queue()\n graph = nx.DiGraph()\n sources: List[NodeID] = []\n\n def ensure_node(expr: rasp.RASPExpr) -> NodeID:\n \"\"\"Finds or creates a graph node corresponding to expr; returns its ID.\"\"\"", "metadata": {"task_id": "deepmind--tracr/53", "ground_truth": " node_id = expr.label\n if node_id not in graph:\n graph.add_node(node_id, **{nodes.ID: node_id, nodes.EXPR: expr})\n\n return node_id\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "rasp_to_graph.py"], "context_start_lineno": 0, "lineno": 43, "function_name": "ensure_node"}, "groundtruth": " node_id = expr.label\n if node_id not in graph:\n graph.add_node(node_id, **{nodes.ID: node_id, nodes.EXPR: expr})\n\n return node_id\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Converting a RaspExpr to a graph.\"\"\"\n\nimport dataclasses\nimport queue\nfrom typing import List\n\nimport networkx as nx\nfrom tracr.compiler import nodes\nfrom tracr.rasp import rasp\n\nNode = nodes.Node\nNodeID = nodes.NodeID\n\n\n@dataclasses.dataclass\nclass ExtractRaspGraphOutput:\n graph: nx.DiGraph\n sink: Node # the program's output.\n sources: List[Node] # the primitive S-Ops.\n\n\ndef extract_rasp_graph(tip: rasp.SOp) -> ExtractRaspGraphOutput:\n \"\"\"Converts a RASP program into a graph representation.\"\"\"\n expr_queue = queue.Queue()\n graph = nx.DiGraph()\n sources: List[NodeID] = []\n\n def ensure_node(expr: rasp.RASPExpr) -> NodeID:\n \"\"\"Finds or creates a graph node corresponding to expr; returns its ID.\"\"\"\n node_id = expr.label\n if node_id not in graph:\n graph.add_node(node_id, **{nodes.ID: node_id, nodes.EXPR: expr})\n\n return node_id\n\n # Breadth-first search over the RASP expression graph.\n\n def visit_raspexpr(expr: rasp.RASPExpr):", "metadata": {"task_id": "deepmind--tracr/54", "ground_truth": " parent_id = ensure_node(expr)\n\n for child_expr in expr.children:\n expr_queue.put(child_expr)\n child_id = ensure_node(child_expr)\n graph.add_edge(child_id, parent_id)\n\n if not expr.children:\n sources.append(graph.nodes[parent_id])\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "rasp_to_graph.py"], "context_start_lineno": 0, "lineno": 52, "function_name": "visit_raspexpr"}, "groundtruth": " parent_id = ensure_node(expr)\n\n for child_expr in expr.children:\n expr_queue.put(child_expr)\n child_id = ensure_node(child_expr)\n graph.add_edge(child_id, parent_id)\n\n if not expr.children:\n sources.append(graph.nodes[parent_id])\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Integration tests for the RASP -> craft stages of the compiler.\"\"\"\n\nimport unittest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tracr.compiler import basis_inference\nfrom tracr.compiler import craft_graph_to_model\nfrom tracr.compiler import expr_to_craft_graph\nfrom tracr.compiler import nodes\nfrom tracr.compiler import rasp_to_graph\nfrom tracr.compiler import test_cases\nfrom tracr.craft import bases\nfrom tracr.craft import tests_common\nfrom tracr.rasp import rasp\n\n_BOS_DIRECTION = \"rasp_to_transformer_integration_test_BOS\"\n_ONE_DIRECTION = \"rasp_to_craft_integration_test_ONE\"\n\n\ndef _make_input_space(vocab, max_seq_len):", "metadata": {"task_id": "deepmind--tracr/55", "ground_truth": " tokens_space = bases.VectorSpaceWithBasis.from_values(\"tokens\", vocab)\n indices_space = bases.VectorSpaceWithBasis.from_values(\n \"indices\", range(max_seq_len))\n one_space = bases.VectorSpaceWithBasis.from_names([_ONE_DIRECTION])\n bos_space = bases.VectorSpaceWithBasis.from_names([_BOS_DIRECTION])\n input_space = bases.join_vector_spaces(tokens_space, indices_space, one_space,\n bos_space)\n\n return input_space\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "rasp_to_craft_integration_test.py"], "context_start_lineno": 0, "lineno": 36, "function_name": "_make_input_space"}, "groundtruth": " tokens_space = bases.VectorSpaceWithBasis.from_values(\"tokens\", vocab)\n indices_space = bases.VectorSpaceWithBasis.from_values(\n \"indices\", range(max_seq_len))\n one_space = bases.VectorSpaceWithBasis.from_names([_ONE_DIRECTION])\n bos_space = bases.VectorSpaceWithBasis.from_names([_BOS_DIRECTION])\n input_space = bases.join_vector_spaces(tokens_space, indices_space, one_space,\n bos_space)\n\n return input_space\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Integration tests for the RASP -> craft stages of the compiler.\"\"\"\n\nimport unittest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tracr.compiler import basis_inference\nfrom tracr.compiler import craft_graph_to_model\nfrom tracr.compiler import expr_to_craft_graph\nfrom tracr.compiler import nodes\nfrom tracr.compiler import rasp_to_graph\nfrom tracr.compiler import test_cases\nfrom tracr.craft import bases\nfrom tracr.craft import tests_common\nfrom tracr.rasp import rasp\n\n_BOS_DIRECTION = \"rasp_to_transformer_integration_test_BOS\"\n_ONE_DIRECTION = \"rasp_to_craft_integration_test_ONE\"\n\n\ndef _make_input_space(vocab, max_seq_len):\n tokens_space = bases.VectorSpaceWithBasis.from_values(\"tokens\", vocab)\n indices_space = bases.VectorSpaceWithBasis.from_values(\n \"indices\", range(max_seq_len))\n one_space = bases.VectorSpaceWithBasis.from_names([_ONE_DIRECTION])\n bos_space = bases.VectorSpaceWithBasis.from_names([_BOS_DIRECTION])\n input_space = bases.join_vector_spaces(tokens_space, indices_space, one_space,\n bos_space)\n\n return input_space\n\n\ndef _embed_input(input_seq, input_space):", "metadata": {"task_id": "deepmind--tracr/56", "ground_truth": " bos_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(_BOS_DIRECTION))\n one_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(_ONE_DIRECTION))\n embedded_input = [bos_vec + one_vec]\n for i, val in enumerate(input_seq):\n i_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(\"indices\", i))\n val_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(\"tokens\", val))\n embedded_input.append(i_vec + val_vec + one_vec)\n return bases.VectorInBasis.stack(embedded_input)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "rasp_to_craft_integration_test.py"], "context_start_lineno": 0, "lineno": 48, "function_name": "_embed_input"}, "groundtruth": " bos_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(_BOS_DIRECTION))\n one_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(_ONE_DIRECTION))\n embedded_input = [bos_vec + one_vec]\n for i, val in enumerate(input_seq):\n i_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(\"indices\", i))\n val_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(\"tokens\", val))\n embedded_input.append(i_vec + val_vec + one_vec)\n return bases.VectorInBasis.stack(embedded_input)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Integration tests for the RASP -> craft stages of the compiler.\"\"\"\n\nimport unittest\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tracr.compiler import basis_inference\nfrom tracr.compiler import craft_graph_to_model\nfrom tracr.compiler import expr_to_craft_graph\nfrom tracr.compiler import nodes\nfrom tracr.compiler import rasp_to_graph\nfrom tracr.compiler import test_cases\nfrom tracr.craft import bases\nfrom tracr.craft import tests_common\nfrom tracr.rasp import rasp\n\n_BOS_DIRECTION = \"rasp_to_transformer_integration_test_BOS\"\n_ONE_DIRECTION = \"rasp_to_craft_integration_test_ONE\"\n\n\ndef _make_input_space(vocab, max_seq_len):\n tokens_space = bases.VectorSpaceWithBasis.from_values(\"tokens\", vocab)\n indices_space = bases.VectorSpaceWithBasis.from_values(\n \"indices\", range(max_seq_len))\n one_space = bases.VectorSpaceWithBasis.from_names([_ONE_DIRECTION])\n bos_space = bases.VectorSpaceWithBasis.from_names([_BOS_DIRECTION])\n input_space = bases.join_vector_spaces(tokens_space, indices_space, one_space,\n bos_space)\n\n return input_space\n\n\ndef _embed_input(input_seq, input_space):\n bos_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(_BOS_DIRECTION))\n one_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(_ONE_DIRECTION))\n embedded_input = [bos_vec + one_vec]\n for i, val in enumerate(input_seq):\n i_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(\"indices\", i))\n val_vec = input_space.vector_from_basis_direction(\n bases.BasisDirection(\"tokens\", val))\n embedded_input.append(i_vec + val_vec + one_vec)\n return bases.VectorInBasis.stack(embedded_input)\n\n\ndef _embed_output(output_seq, output_space, categorical_output):", "metadata": {"task_id": "deepmind--tracr/57", "ground_truth": " embedded_output = []\n output_label = output_space.basis[0].name\n for x in output_seq:\n if x is None:\n out_vec = output_space.null_vector()\n elif categorical_output:\n out_vec = output_space.vector_from_basis_direction(\n bases.BasisDirection(output_label, x))\n else:\n out_vec = x * output_space.vector_from_basis_direction(\n output_space.basis[0])\n embedded_output.append(out_vec)\n return bases.VectorInBasis.stack(embedded_output)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "rasp_to_craft_integration_test.py"], "context_start_lineno": 0, "lineno": 63, "function_name": "_embed_output"}, "groundtruth": " embedded_output = []\n output_label = output_space.basis[0].name\n for x in output_seq:\n if x is None:\n out_vec = output_space.null_vector()\n elif categorical_output:\n out_vec = output_space.vector_from_basis_direction(\n bases.BasisDirection(output_label, x))\n else:\n out_vec = x * output_space.vector_from_basis_direction(\n output_space.basis[0])\n embedded_output.append(out_vec)\n return bases.VectorInBasis.stack(embedded_output)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/58", "ground_truth": " all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 35, "function_name": "make_length"}, "groundtruth": " all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/59", "ground_truth": " opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 57, "function_name": "make_reverse"}, "groundtruth": " opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/60", "ground_truth": " bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 87, "function_name": "make_pair_balance"}, "groundtruth": " bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/61", "ground_truth": " same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 165, "function_name": "make_hist"}, "groundtruth": " same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"\n same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n\n\ndef make_sort_unique(vals: rasp.SOp, keys: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys.\n\n Only supports unique keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/62", "ground_truth": " smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 184, "function_name": "make_sort_unique"}, "groundtruth": " smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"\n same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n\n\ndef make_sort_unique(vals: rasp.SOp, keys: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys.\n\n Only supports unique keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n \"\"\"\n smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n\n\ndef make_sort(vals: rasp.SOp, keys: rasp.SOp, *, max_seq_len: int,\n min_key: float) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys, which don't need to be unique.\n\n The implementation differs from the RASP paper, as it avoids using\n compositions of selectors to break ties. Instead, it uses the arguments\n max_seq_len and min_key to ensure the keys are unique.\n\n Note that this approach only works for numerical keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens, 5, 1)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n sort([2, 4, 1, 2])\n >> [1, 2, 2, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n min_key: Minimum key value (used to ensure keys are unique)\n\n Returns:\n Output SOp of sort program.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/63", "ground_truth": " keys = rasp.SequenceMap(lambda x, i: x + min_key * i / max_seq_len, keys,\n rasp.indices)\n return make_sort_unique(vals, keys)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 216, "function_name": "make_sort"}, "groundtruth": " keys = rasp.SequenceMap(lambda x, i: x + min_key * i / max_seq_len, keys,\n rasp.indices)\n return make_sort_unique(vals, keys)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"\n same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n\n\ndef make_sort_unique(vals: rasp.SOp, keys: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys.\n\n Only supports unique keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n \"\"\"\n smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n\n\ndef make_sort(vals: rasp.SOp, keys: rasp.SOp, *, max_seq_len: int,\n min_key: float) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys, which don't need to be unique.\n\n The implementation differs from the RASP paper, as it avoids using\n compositions of selectors to break ties. Instead, it uses the arguments\n max_seq_len and min_key to ensure the keys are unique.\n\n Note that this approach only works for numerical keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens, 5, 1)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n sort([2, 4, 1, 2])\n >> [1, 2, 2, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n min_key: Minimum key value (used to ensure keys are unique)\n\n Returns:\n Output SOp of sort program.\n \"\"\"\n keys = rasp.SequenceMap(lambda x, i: x + min_key * i / max_seq_len, keys,\n rasp.indices)\n return make_sort_unique(vals, keys)\n\n\ndef make_sort_freq(max_seq_len: int) -> rasp.SOp:\n \"\"\"Returns tokens sorted by the frequency they appear in the input.\n\n Tokens the appear the same amount of times are output in the same order as in\n the input.\n\n Example usage:\n sort = make_sort_freq(rasp.tokens, rasp.tokens, 5)\n sort([2, 4, 2, 1])\n >> [2, 2, 4, 1]\n\n Args:\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/64", "ground_truth": " hist = -1 * make_hist().named(\"hist\")\n return make_sort(\n rasp.tokens, hist, max_seq_len=max_seq_len, min_key=1).named(\"sort_freq\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 235, "function_name": "make_sort_freq"}, "groundtruth": " hist = -1 * make_hist().named(\"hist\")\n return make_sort(\n rasp.tokens, hist, max_seq_len=max_seq_len, min_key=1).named(\"sort_freq\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"\n same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n\n\ndef make_sort_unique(vals: rasp.SOp, keys: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys.\n\n Only supports unique keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n \"\"\"\n smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n\n\ndef make_sort(vals: rasp.SOp, keys: rasp.SOp, *, max_seq_len: int,\n min_key: float) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys, which don't need to be unique.\n\n The implementation differs from the RASP paper, as it avoids using\n compositions of selectors to break ties. Instead, it uses the arguments\n max_seq_len and min_key to ensure the keys are unique.\n\n Note that this approach only works for numerical keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens, 5, 1)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n sort([2, 4, 1, 2])\n >> [1, 2, 2, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n min_key: Minimum key value (used to ensure keys are unique)\n\n Returns:\n Output SOp of sort program.\n \"\"\"\n keys = rasp.SequenceMap(lambda x, i: x + min_key * i / max_seq_len, keys,\n rasp.indices)\n return make_sort_unique(vals, keys)\n\n\ndef make_sort_freq(max_seq_len: int) -> rasp.SOp:\n \"\"\"Returns tokens sorted by the frequency they appear in the input.\n\n Tokens the appear the same amount of times are output in the same order as in\n the input.\n\n Example usage:\n sort = make_sort_freq(rasp.tokens, rasp.tokens, 5)\n sort([2, 4, 2, 1])\n >> [2, 2, 4, 1]\n\n Args:\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n \"\"\"\n hist = -1 * make_hist().named(\"hist\")\n return make_sort(\n rasp.tokens, hist, max_seq_len=max_seq_len, min_key=1).named(\"sort_freq\")\n\n\n### Programs that work under both causal and regular evaluation.\n\n\ndef make_frac_prevs(bools: rasp.SOp) -> rasp.SOp:\n \"\"\"Count the fraction of previous tokens where a specific condition was True.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n num_l = make_frac_prevs(rasp.tokens==\"l\")\n num_l(\"hello\")\n >> [0, 0, 1/3, 1/2, 2/5]\n\n Args:\n bools: SOp mapping a sequence to a sequence of booleans.\n\n Returns:\n frac_prevs: SOp mapping an input to a sequence, where every element\n is the fraction of previous \"True\" tokens.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/65", "ground_truth": " bools = rasp.numerical(bools)\n prevs = rasp.Select(rasp.indices, rasp.indices, rasp.Comparison.LEQ)\n return rasp.numerical(rasp.Aggregate(prevs, bools,\n default=0)).named(\"frac_prevs\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 260, "function_name": "make_frac_prevs"}, "groundtruth": " bools = rasp.numerical(bools)\n prevs = rasp.Select(rasp.indices, rasp.indices, rasp.Comparison.LEQ)\n return rasp.numerical(rasp.Aggregate(prevs, bools,\n default=0)).named(\"frac_prevs\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"\n same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n\n\ndef make_sort_unique(vals: rasp.SOp, keys: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys.\n\n Only supports unique keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n \"\"\"\n smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n\n\ndef make_sort(vals: rasp.SOp, keys: rasp.SOp, *, max_seq_len: int,\n min_key: float) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys, which don't need to be unique.\n\n The implementation differs from the RASP paper, as it avoids using\n compositions of selectors to break ties. Instead, it uses the arguments\n max_seq_len and min_key to ensure the keys are unique.\n\n Note that this approach only works for numerical keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens, 5, 1)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n sort([2, 4, 1, 2])\n >> [1, 2, 2, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n min_key: Minimum key value (used to ensure keys are unique)\n\n Returns:\n Output SOp of sort program.\n \"\"\"\n keys = rasp.SequenceMap(lambda x, i: x + min_key * i / max_seq_len, keys,\n rasp.indices)\n return make_sort_unique(vals, keys)\n\n\ndef make_sort_freq(max_seq_len: int) -> rasp.SOp:\n \"\"\"Returns tokens sorted by the frequency they appear in the input.\n\n Tokens the appear the same amount of times are output in the same order as in\n the input.\n\n Example usage:\n sort = make_sort_freq(rasp.tokens, rasp.tokens, 5)\n sort([2, 4, 2, 1])\n >> [2, 2, 4, 1]\n\n Args:\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n \"\"\"\n hist = -1 * make_hist().named(\"hist\")\n return make_sort(\n rasp.tokens, hist, max_seq_len=max_seq_len, min_key=1).named(\"sort_freq\")\n\n\n### Programs that work under both causal and regular evaluation.\n\n\ndef make_frac_prevs(bools: rasp.SOp) -> rasp.SOp:\n \"\"\"Count the fraction of previous tokens where a specific condition was True.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n num_l = make_frac_prevs(rasp.tokens==\"l\")\n num_l(\"hello\")\n >> [0, 0, 1/3, 1/2, 2/5]\n\n Args:\n bools: SOp mapping a sequence to a sequence of booleans.\n\n Returns:\n frac_prevs: SOp mapping an input to a sequence, where every element\n is the fraction of previous \"True\" tokens.\n \"\"\"\n bools = rasp.numerical(bools)\n prevs = rasp.Select(rasp.indices, rasp.indices, rasp.Comparison.LEQ)\n return rasp.numerical(rasp.Aggregate(prevs, bools,\n default=0)).named(\"frac_prevs\")\n\n\ndef shift_by(offset: int, /, sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns the sop, shifted by `offset`, None-padded.\"\"\"", "metadata": {"task_id": "deepmind--tracr/66", "ground_truth": " select_off_by_offset = rasp.Select(rasp.indices, rasp.indices,\n lambda k, q: q == k + offset)\n out = rasp.Aggregate(select_off_by_offset, sop, default=None)\n return out.named(f\"shift_by({offset})\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 268, "function_name": "shift_by"}, "groundtruth": " select_off_by_offset = rasp.Select(rasp.indices, rasp.indices,\n lambda k, q: q == k + offset)\n out = rasp.Aggregate(select_off_by_offset, sop, default=None)\n return out.named(f\"shift_by({offset})\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"\n same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n\n\ndef make_sort_unique(vals: rasp.SOp, keys: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys.\n\n Only supports unique keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n \"\"\"\n smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n\n\ndef make_sort(vals: rasp.SOp, keys: rasp.SOp, *, max_seq_len: int,\n min_key: float) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys, which don't need to be unique.\n\n The implementation differs from the RASP paper, as it avoids using\n compositions of selectors to break ties. Instead, it uses the arguments\n max_seq_len and min_key to ensure the keys are unique.\n\n Note that this approach only works for numerical keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens, 5, 1)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n sort([2, 4, 1, 2])\n >> [1, 2, 2, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n min_key: Minimum key value (used to ensure keys are unique)\n\n Returns:\n Output SOp of sort program.\n \"\"\"\n keys = rasp.SequenceMap(lambda x, i: x + min_key * i / max_seq_len, keys,\n rasp.indices)\n return make_sort_unique(vals, keys)\n\n\ndef make_sort_freq(max_seq_len: int) -> rasp.SOp:\n \"\"\"Returns tokens sorted by the frequency they appear in the input.\n\n Tokens the appear the same amount of times are output in the same order as in\n the input.\n\n Example usage:\n sort = make_sort_freq(rasp.tokens, rasp.tokens, 5)\n sort([2, 4, 2, 1])\n >> [2, 2, 4, 1]\n\n Args:\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n \"\"\"\n hist = -1 * make_hist().named(\"hist\")\n return make_sort(\n rasp.tokens, hist, max_seq_len=max_seq_len, min_key=1).named(\"sort_freq\")\n\n\n### Programs that work under both causal and regular evaluation.\n\n\ndef make_frac_prevs(bools: rasp.SOp) -> rasp.SOp:\n \"\"\"Count the fraction of previous tokens where a specific condition was True.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n num_l = make_frac_prevs(rasp.tokens==\"l\")\n num_l(\"hello\")\n >> [0, 0, 1/3, 1/2, 2/5]\n\n Args:\n bools: SOp mapping a sequence to a sequence of booleans.\n\n Returns:\n frac_prevs: SOp mapping an input to a sequence, where every element\n is the fraction of previous \"True\" tokens.\n \"\"\"\n bools = rasp.numerical(bools)\n prevs = rasp.Select(rasp.indices, rasp.indices, rasp.Comparison.LEQ)\n return rasp.numerical(rasp.Aggregate(prevs, bools,\n default=0)).named(\"frac_prevs\")\n\n\ndef shift_by(offset: int, /, sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns the sop, shifted by `offset`, None-padded.\"\"\"\n select_off_by_offset = rasp.Select(rasp.indices, rasp.indices,\n lambda k, q: q == k + offset)\n out = rasp.Aggregate(select_off_by_offset, sop, default=None)\n return out.named(f\"shift_by({offset})\")\n\n\ndef detect_pattern(sop: rasp.SOp, pattern: Sequence[rasp.Value]) -> rasp.SOp:\n \"\"\"Returns an SOp which is True at the final element of the pattern.\n\n The first len(pattern) - 1 elements of the output SOp are None-padded.\n\n detect_pattern(tokens, \"abc\")(\"abcabc\") == [None, None, T, F, F, T]\n\n Args:\n sop: the SOp in which to look for patterns.\n pattern: a sequence of values to look for.\n\n Returns:\n a sop which detects the pattern.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/67", "ground_truth": " if len(pattern) < 1:\n raise ValueError(f\"Length of `pattern` must be at least 1. Got {pattern}\")\n\n # detectors[i] will be a boolean-valued SOp which is true at position j iff\n # the i'th (from the end) element of the pattern was detected at position j-i.\n detectors = []\n for i, element in enumerate(reversed(pattern)):\n detector = sop == element\n if i != 0:\n detector = shift_by(i, detector)\n detectors.append(detector)\n\n # All that's left is to take the AND over all detectors.\n pattern_detected = detectors.pop()\n while detectors:\n pattern_detected = pattern_detected & detectors.pop()\n\n return pattern_detected.named(f\"detect_pattern({pattern})\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 289, "function_name": "detect_pattern"}, "groundtruth": " if len(pattern) < 1:\n raise ValueError(f\"Length of `pattern` must be at least 1. Got {pattern}\")\n\n # detectors[i] will be a boolean-valued SOp which is true at position j iff\n # the i'th (from the end) element of the pattern was detected at position j-i.\n detectors = []\n for i, element in enumerate(reversed(pattern)):\n detector = sop == element\n if i != 0:\n detector = shift_by(i, detector)\n detectors.append(detector)\n\n # All that's left is to take the AND over all detectors.\n pattern_detected = detectors.pop()\n while detectors:\n pattern_detected = pattern_detected & detectors.pop()\n\n return pattern_detected.named(f\"detect_pattern({pattern})\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"\n same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n\n\ndef make_sort_unique(vals: rasp.SOp, keys: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys.\n\n Only supports unique keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n \"\"\"\n smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n\n\ndef make_sort(vals: rasp.SOp, keys: rasp.SOp, *, max_seq_len: int,\n min_key: float) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys, which don't need to be unique.\n\n The implementation differs from the RASP paper, as it avoids using\n compositions of selectors to break ties. Instead, it uses the arguments\n max_seq_len and min_key to ensure the keys are unique.\n\n Note that this approach only works for numerical keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens, 5, 1)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n sort([2, 4, 1, 2])\n >> [1, 2, 2, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n min_key: Minimum key value (used to ensure keys are unique)\n\n Returns:\n Output SOp of sort program.\n \"\"\"\n keys = rasp.SequenceMap(lambda x, i: x + min_key * i / max_seq_len, keys,\n rasp.indices)\n return make_sort_unique(vals, keys)\n\n\ndef make_sort_freq(max_seq_len: int) -> rasp.SOp:\n \"\"\"Returns tokens sorted by the frequency they appear in the input.\n\n Tokens the appear the same amount of times are output in the same order as in\n the input.\n\n Example usage:\n sort = make_sort_freq(rasp.tokens, rasp.tokens, 5)\n sort([2, 4, 2, 1])\n >> [2, 2, 4, 1]\n\n Args:\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n \"\"\"\n hist = -1 * make_hist().named(\"hist\")\n return make_sort(\n rasp.tokens, hist, max_seq_len=max_seq_len, min_key=1).named(\"sort_freq\")\n\n\n### Programs that work under both causal and regular evaluation.\n\n\ndef make_frac_prevs(bools: rasp.SOp) -> rasp.SOp:\n \"\"\"Count the fraction of previous tokens where a specific condition was True.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n num_l = make_frac_prevs(rasp.tokens==\"l\")\n num_l(\"hello\")\n >> [0, 0, 1/3, 1/2, 2/5]\n\n Args:\n bools: SOp mapping a sequence to a sequence of booleans.\n\n Returns:\n frac_prevs: SOp mapping an input to a sequence, where every element\n is the fraction of previous \"True\" tokens.\n \"\"\"\n bools = rasp.numerical(bools)\n prevs = rasp.Select(rasp.indices, rasp.indices, rasp.Comparison.LEQ)\n return rasp.numerical(rasp.Aggregate(prevs, bools,\n default=0)).named(\"frac_prevs\")\n\n\ndef shift_by(offset: int, /, sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns the sop, shifted by `offset`, None-padded.\"\"\"\n select_off_by_offset = rasp.Select(rasp.indices, rasp.indices,\n lambda k, q: q == k + offset)\n out = rasp.Aggregate(select_off_by_offset, sop, default=None)\n return out.named(f\"shift_by({offset})\")\n\n\ndef detect_pattern(sop: rasp.SOp, pattern: Sequence[rasp.Value]) -> rasp.SOp:\n \"\"\"Returns an SOp which is True at the final element of the pattern.\n\n The first len(pattern) - 1 elements of the output SOp are None-padded.\n\n detect_pattern(tokens, \"abc\")(\"abcabc\") == [None, None, T, F, F, T]\n\n Args:\n sop: the SOp in which to look for patterns.\n pattern: a sequence of values to look for.\n\n Returns:\n a sop which detects the pattern.\n \"\"\"\n\n if len(pattern) < 1:\n raise ValueError(f\"Length of `pattern` must be at least 1. Got {pattern}\")\n\n # detectors[i] will be a boolean-valued SOp which is true at position j iff\n # the i'th (from the end) element of the pattern was detected at position j-i.\n detectors = []\n for i, element in enumerate(reversed(pattern)):\n detector = sop == element\n if i != 0:\n detector = shift_by(i, detector)\n detectors.append(detector)\n\n # All that's left is to take the AND over all detectors.\n pattern_detected = detectors.pop()\n while detectors:\n pattern_detected = pattern_detected & detectors.pop()\n\n return pattern_detected.named(f\"detect_pattern({pattern})\")\n\n\ndef make_count_less_freq(n: int) -> rasp.SOp:\n \"\"\"Returns how many tokens appear fewer than n times in the input.\n\n The output sequence contains this count in each position.\n\n Example usage:\n count_less_freq = make_count_less_freq(2)\n count_less_freq([\"a\", \"a\", \"a\", \"b\", \"b\", \"c\"])\n >> [3, 3, 3, 3, 3, 3]\n count_less_freq([\"a\", \"a\", \"c\", \"b\", \"b\", \"c\"])\n >> [6, 6, 6, 6, 6, 6]\n\n Args:\n n: Integer to compare token frequences to.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/68", "ground_truth": " hist = make_hist().named(\"hist\")\n select_less = rasp.Select(hist, hist,\n lambda x, y: x <= n).named(\"select_less\")\n return rasp.SelectorWidth(select_less).named(\"count_less_freq\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 324, "function_name": "make_count_less_freq"}, "groundtruth": " hist = make_hist().named(\"hist\")\n select_less = rasp.Select(hist, hist,\n lambda x, y: x <= n).named(\"select_less\")\n return rasp.SelectorWidth(select_less).named(\"count_less_freq\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"RASP programs only using the subset of RASP supported by the compiler.\"\"\"\n\nfrom typing import List, Sequence\n\nfrom tracr.rasp import rasp\n\n### Programs that work only under non-causal evaluation.\n\n\ndef make_length() -> rasp.SOp:\n \"\"\"Creates the `length` SOp using selector width primitive.\n\n Example usage:\n length = make_length()\n length(\"abcdefg\")\n >> [7.0, 7.0, 7.0, 7.0, 7.0, 7.0, 7.0]\n\n Returns:\n length: SOp mapping an input to a sequence, where every element\n is the length of that sequence.\n \"\"\"\n all_true_selector = rasp.Select(\n rasp.tokens, rasp.tokens, rasp.Comparison.TRUE).named(\"all_true_selector\")\n return rasp.SelectorWidth(all_true_selector).named(\"length\")\n\n\nlength = make_length()\n\n\ndef make_reverse(sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Create an SOp that reverses a sequence, using length primitive.\n\n Example usage:\n reverse = make_reverse(rasp.tokens)\n reverse(\"Hello\")\n >> ['o', 'l', 'l', 'e', 'H']\n\n Args:\n sop: an SOp\n\n Returns:\n reverse : SOp that reverses the input sequence.\n \"\"\"\n opp_idx = (length - rasp.indices).named(\"opp_idx\")\n opp_idx = (opp_idx - 1).named(\"opp_idx-1\")\n reverse_selector = rasp.Select(rasp.indices, opp_idx,\n rasp.Comparison.EQ).named(\"reverse_selector\")\n return rasp.Aggregate(reverse_selector, sop).named(\"reverse\")\n\n\ndef make_pair_balance(sop: rasp.SOp, open_token: str,\n close_token: str) -> rasp.SOp:\n \"\"\"Return fraction of previous open tokens minus the fraction of close tokens.\n\n (As implemented in the RASP paper.)\n\n If the outputs are always non-negative and end in 0, that implies the input\n has balanced parentheses.\n\n Example usage:\n num_l = make_pair_balance(rasp.tokens, \"(\", \")\")\n num_l(\"a()b(c))\")\n >> [0, 1/2, 0, 0, 1/5, 1/6, 0, -1/8]\n\n Args:\n sop: Input SOp.\n open_token: Token that counts positive.\n close_token: Token that counts negative.\n\n Returns:\n pair_balance: SOp mapping an input to a sequence, where every element\n is the fraction of previous open tokens minus previous close tokens.\n \"\"\"\n bools_open = rasp.numerical(sop == open_token).named(\"bools_open\")\n opens = rasp.numerical(make_frac_prevs(bools_open)).named(\"opens\")\n\n bools_close = rasp.numerical(sop == close_token).named(\"bools_close\")\n closes = rasp.numerical(make_frac_prevs(bools_close)).named(\"closes\")\n\n pair_balance = rasp.numerical(rasp.LinearSequenceMap(opens, closes, 1, -1))\n return pair_balance.named(\"pair_balance\")\n\n\ndef make_shuffle_dyck(pairs: List[str]) -> rasp.SOp:\n \"\"\"Returns 1 if a set of parentheses are balanced, 0 else.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n shuffle_dyck2 = make_shuffle_dyck(pairs=[\"()\", \"{}\"])\n shuffle_dyck2(\"({)}\")\n >> [1, 1, 1, 1]\n shuffle_dyck2(\"(){)}\")\n >> [0, 0, 0, 0, 0]\n\n Args:\n pairs: List of pairs of open and close tokens that each should be balanced.\n \"\"\"\n assert len(pairs) >= 1\n\n # Compute running balance of each type of parenthesis\n balances = []\n for pair in pairs:\n assert len(pair) == 2\n open_token, close_token = pair\n balance = make_pair_balance(\n rasp.tokens, open_token=open_token,\n close_token=close_token).named(f\"balance_{pair}\")\n balances.append(balance)\n\n # Check if balances where negative anywhere -> parentheses not balanced\n any_negative = balances[0] < 0\n for balance in balances[1:]:\n any_negative = any_negative | (balance < 0)\n\n # Convert to numerical SOp\n any_negative = rasp.numerical(rasp.Map(lambda x: x,\n any_negative)).named(\"any_negative\")\n\n select_all = rasp.Select(rasp.indices, rasp.indices,\n rasp.Comparison.TRUE).named(\"select_all\")\n has_neg = rasp.numerical(rasp.Aggregate(select_all, any_negative,\n default=0)).named(\"has_neg\")\n\n # Check if all balances are 0 at the end -> closed all parentheses\n all_zero = balances[0] == 0\n for balance in balances[1:]:\n all_zero = all_zero & (balance == 0)\n\n select_last = rasp.Select(rasp.indices, length - 1,\n rasp.Comparison.EQ).named(\"select_last\")\n last_zero = rasp.Aggregate(select_last, all_zero).named(\"last_zero\")\n\n not_has_neg = (~has_neg).named(\"not_has_neg\")\n return (last_zero & not_has_neg).named(\"shuffle_dyck\")\n\n\ndef make_shuffle_dyck2() -> rasp.SOp:\n return make_shuffle_dyck(pairs=[\"()\", \"{}\"]).named(\"shuffle_dyck2\")\n\n\ndef make_hist() -> rasp.SOp:\n \"\"\"Returns the number of times each token occurs in the input.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n hist = make_hist()\n hist(\"abac\")\n >> [2, 1, 2, 1]\n \"\"\"\n same_tok = rasp.Select(rasp.tokens, rasp.tokens,\n rasp.Comparison.EQ).named(\"same_tok\")\n return rasp.SelectorWidth(same_tok).named(\"hist\")\n\n\ndef make_sort_unique(vals: rasp.SOp, keys: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys.\n\n Only supports unique keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n \"\"\"\n smaller = rasp.Select(keys, keys, rasp.Comparison.LT).named(\"smaller\")\n target_pos = rasp.SelectorWidth(smaller).named(\"target_pos\")\n sel_new = rasp.Select(target_pos, rasp.indices, rasp.Comparison.EQ)\n return rasp.Aggregate(sel_new, vals).named(\"sort\")\n\n\ndef make_sort(vals: rasp.SOp, keys: rasp.SOp, *, max_seq_len: int,\n min_key: float) -> rasp.SOp:\n \"\"\"Returns vals sorted by < relation on keys, which don't need to be unique.\n\n The implementation differs from the RASP paper, as it avoids using\n compositions of selectors to break ties. Instead, it uses the arguments\n max_seq_len and min_key to ensure the keys are unique.\n\n Note that this approach only works for numerical keys.\n\n Example usage:\n sort = make_sort(rasp.tokens, rasp.tokens, 5, 1)\n sort([2, 4, 3, 1])\n >> [1, 2, 3, 4]\n sort([2, 4, 1, 2])\n >> [1, 2, 2, 4]\n\n Args:\n vals: Values to sort.\n keys: Keys for sorting.\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n min_key: Minimum key value (used to ensure keys are unique)\n\n Returns:\n Output SOp of sort program.\n \"\"\"\n keys = rasp.SequenceMap(lambda x, i: x + min_key * i / max_seq_len, keys,\n rasp.indices)\n return make_sort_unique(vals, keys)\n\n\ndef make_sort_freq(max_seq_len: int) -> rasp.SOp:\n \"\"\"Returns tokens sorted by the frequency they appear in the input.\n\n Tokens the appear the same amount of times are output in the same order as in\n the input.\n\n Example usage:\n sort = make_sort_freq(rasp.tokens, rasp.tokens, 5)\n sort([2, 4, 2, 1])\n >> [2, 2, 4, 1]\n\n Args:\n max_seq_len: Maximum sequence length (used to ensure keys are unique)\n \"\"\"\n hist = -1 * make_hist().named(\"hist\")\n return make_sort(\n rasp.tokens, hist, max_seq_len=max_seq_len, min_key=1).named(\"sort_freq\")\n\n\n### Programs that work under both causal and regular evaluation.\n\n\ndef make_frac_prevs(bools: rasp.SOp) -> rasp.SOp:\n \"\"\"Count the fraction of previous tokens where a specific condition was True.\n\n (As implemented in the RASP paper.)\n\n Example usage:\n num_l = make_frac_prevs(rasp.tokens==\"l\")\n num_l(\"hello\")\n >> [0, 0, 1/3, 1/2, 2/5]\n\n Args:\n bools: SOp mapping a sequence to a sequence of booleans.\n\n Returns:\n frac_prevs: SOp mapping an input to a sequence, where every element\n is the fraction of previous \"True\" tokens.\n \"\"\"\n bools = rasp.numerical(bools)\n prevs = rasp.Select(rasp.indices, rasp.indices, rasp.Comparison.LEQ)\n return rasp.numerical(rasp.Aggregate(prevs, bools,\n default=0)).named(\"frac_prevs\")\n\n\ndef shift_by(offset: int, /, sop: rasp.SOp) -> rasp.SOp:\n \"\"\"Returns the sop, shifted by `offset`, None-padded.\"\"\"\n select_off_by_offset = rasp.Select(rasp.indices, rasp.indices,\n lambda k, q: q == k + offset)\n out = rasp.Aggregate(select_off_by_offset, sop, default=None)\n return out.named(f\"shift_by({offset})\")\n\n\ndef detect_pattern(sop: rasp.SOp, pattern: Sequence[rasp.Value]) -> rasp.SOp:\n \"\"\"Returns an SOp which is True at the final element of the pattern.\n\n The first len(pattern) - 1 elements of the output SOp are None-padded.\n\n detect_pattern(tokens, \"abc\")(\"abcabc\") == [None, None, T, F, F, T]\n\n Args:\n sop: the SOp in which to look for patterns.\n pattern: a sequence of values to look for.\n\n Returns:\n a sop which detects the pattern.\n \"\"\"\n\n if len(pattern) < 1:\n raise ValueError(f\"Length of `pattern` must be at least 1. Got {pattern}\")\n\n # detectors[i] will be a boolean-valued SOp which is true at position j iff\n # the i'th (from the end) element of the pattern was detected at position j-i.\n detectors = []\n for i, element in enumerate(reversed(pattern)):\n detector = sop == element\n if i != 0:\n detector = shift_by(i, detector)\n detectors.append(detector)\n\n # All that's left is to take the AND over all detectors.\n pattern_detected = detectors.pop()\n while detectors:\n pattern_detected = pattern_detected & detectors.pop()\n\n return pattern_detected.named(f\"detect_pattern({pattern})\")\n\n\ndef make_count_less_freq(n: int) -> rasp.SOp:\n \"\"\"Returns how many tokens appear fewer than n times in the input.\n\n The output sequence contains this count in each position.\n\n Example usage:\n count_less_freq = make_count_less_freq(2)\n count_less_freq([\"a\", \"a\", \"a\", \"b\", \"b\", \"c\"])\n >> [3, 3, 3, 3, 3, 3]\n count_less_freq([\"a\", \"a\", \"c\", \"b\", \"b\", \"c\"])\n >> [6, 6, 6, 6, 6, 6]\n\n Args:\n n: Integer to compare token frequences to.\n \"\"\"\n hist = make_hist().named(\"hist\")\n select_less = rasp.Select(hist, hist,\n lambda x, y: x <= n).named(\"select_less\")\n return rasp.SelectorWidth(select_less).named(\"count_less_freq\")\n\n\ndef make_count(sop, token):\n \"\"\"Returns the count of `token` in `sop`.\n\n The output sequence contains this count in each position.\n\n Example usage:\n count = make_count(tokens, \"a\")\n count([\"a\", \"a\", \"a\", \"b\", \"b\", \"c\"])\n >> [3, 3, 3, 3, 3, 3]\n count([\"c\", \"a\", \"b\", \"c\"])\n >> [1, 1, 1, 1]\n\n Args:\n sop: Sop to count tokens in.\n token: Token to count.\n \"\"\"\n return rasp.SelectorWidth(rasp.Select(\n sop, sop, lambda k, q: k == token)).named(f\"count_{token}\")\n\n\ndef make_nary_sequencemap(f, *sops):\n \"\"\"Returns an SOp that simulates an n-ary SequenceMap.\n\n Uses multiple binary SequenceMaps to convert n SOps x_1, x_2, ..., x_n\n into a single SOp arguments that takes n-tuples as value. The n-ary sequence\n map implementing f is then a Map on this resulting SOp.\n\n Note that the intermediate variables representing tuples of varying length\n will be encoded categorically, and can become very high-dimensional. So,\n using this function might lead to very large compiled models.\n\n Args:\n f: Function with n arguments.\n *sops: Sequence of SOps, one for each argument of f.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/69", "ground_truth": " values, *sops = sops\n for sop in sops:\n # x is a single entry in the first iteration but a tuple in later iterations\n values = rasp.SequenceMap(\n lambda x, y: (*x, y) if isinstance(x, tuple) else (x, y), values, sop)\n return rasp.Map(lambda args: f(*args), values)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "lib.py"], "context_start_lineno": 0, "lineno": 365, "function_name": "make_nary_sequencemap"}, "groundtruth": " values, *sops = sops\n for sop in sops:\n # x is a single entry in the first iteration but a tuple in later iterations\n values = rasp.SequenceMap(\n lambda x, y: (*x, y) if isinstance(x, tuple) else (x, y), values, sop)\n return rasp.Map(lambda args: f(*args), values)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.assemble.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.compiler import assemble\nfrom tracr.craft import bases\n\n\nclass AssembleTest(parameterized.TestCase):\n\n def test_token_embedding_produces_correct_embedding(self):\n # Token embeddings should be one-hot embeddings of the input integers\n # into the token subspace of residual_space\n input_space = bases.VectorSpaceWithBasis.from_values(\"0inp\", range(2))\n indices_space = bases.VectorSpaceWithBasis.from_values(\"1ind\", range(3))\n output_space = bases.VectorSpaceWithBasis.from_values(\"2out\", range(2))\n residual_space = bases.join_vector_spaces(input_space, indices_space,\n output_space)\n\n @hk.without_apply_rng\n @hk.transform\n def token_pos_embed(tokens):", "metadata": {"task_id": "deepmind--tracr/70", "ground_truth": " embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.token_embed(tokens)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "assemble_test.py"], "context_start_lineno": 0, "lineno": 40, "function_name": "token_pos_embed"}, "groundtruth": " embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.token_embed(tokens)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.assemble.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.compiler import assemble\nfrom tracr.craft import bases\n\n\nclass AssembleTest(parameterized.TestCase):\n\n def test_token_embedding_produces_correct_embedding(self):\n # Token embeddings should be one-hot embeddings of the input integers\n # into the token subspace of residual_space\n input_space = bases.VectorSpaceWithBasis.from_values(\"0inp\", range(2))\n indices_space = bases.VectorSpaceWithBasis.from_values(\"1ind\", range(3))\n output_space = bases.VectorSpaceWithBasis.from_values(\"2out\", range(2))\n residual_space = bases.join_vector_spaces(input_space, indices_space,\n output_space)\n\n @hk.without_apply_rng\n @hk.transform\n def token_pos_embed(tokens):\n embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.token_embed(tokens)\n\n tokens = jnp.array([0, 0, 1])\n expected_token_embeddings = jnp.array([[1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0]])\n\n params = token_pos_embed.init(jax.random.PRNGKey(0), tokens)\n embeddings = token_pos_embed.apply(params, tokens)\n np.testing.assert_allclose(embeddings, expected_token_embeddings)\n\n def test_position_embedding_produces_correct_embedding(self):\n # Position embeddings should be one-hot embeddings of the input integers\n # (representing indices) into the indices subspace of residual_space\n input_space = bases.VectorSpaceWithBasis.from_values(\"0inp\", range(2))\n indices_space = bases.VectorSpaceWithBasis.from_values(\"1ind\", range(3))\n output_space = bases.VectorSpaceWithBasis.from_values(\"2out\", range(2))\n residual_space = bases.join_vector_spaces(input_space, indices_space,\n output_space)\n\n @hk.without_apply_rng\n @hk.transform\n def token_pos_embed(tokens):", "metadata": {"task_id": "deepmind--tracr/71", "ground_truth": " embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.pos_embed(jnp.indices(tokens.shape)[-1])\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "assemble_test.py"], "context_start_lineno": 0, "lineno": 68, "function_name": "token_pos_embed"}, "groundtruth": " embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.pos_embed(jnp.indices(tokens.shape)[-1])\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.assemble.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.compiler import assemble\nfrom tracr.craft import bases\n\n\nclass AssembleTest(parameterized.TestCase):\n\n def test_token_embedding_produces_correct_embedding(self):\n # Token embeddings should be one-hot embeddings of the input integers\n # into the token subspace of residual_space\n input_space = bases.VectorSpaceWithBasis.from_values(\"0inp\", range(2))\n indices_space = bases.VectorSpaceWithBasis.from_values(\"1ind\", range(3))\n output_space = bases.VectorSpaceWithBasis.from_values(\"2out\", range(2))\n residual_space = bases.join_vector_spaces(input_space, indices_space,\n output_space)\n\n @hk.without_apply_rng\n @hk.transform\n def token_pos_embed(tokens):\n embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.token_embed(tokens)\n\n tokens = jnp.array([0, 0, 1])\n expected_token_embeddings = jnp.array([[1, 0, 0, 0, 0, 0, 0],\n [1, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0]])\n\n params = token_pos_embed.init(jax.random.PRNGKey(0), tokens)\n embeddings = token_pos_embed.apply(params, tokens)\n np.testing.assert_allclose(embeddings, expected_token_embeddings)\n\n def test_position_embedding_produces_correct_embedding(self):\n # Position embeddings should be one-hot embeddings of the input integers\n # (representing indices) into the indices subspace of residual_space\n input_space = bases.VectorSpaceWithBasis.from_values(\"0inp\", range(2))\n indices_space = bases.VectorSpaceWithBasis.from_values(\"1ind\", range(3))\n output_space = bases.VectorSpaceWithBasis.from_values(\"2out\", range(2))\n residual_space = bases.join_vector_spaces(input_space, indices_space,\n output_space)\n\n @hk.without_apply_rng\n @hk.transform\n def token_pos_embed(tokens):\n embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.pos_embed(jnp.indices(tokens.shape)[-1])\n\n tokens = jnp.array([3, 0, 0, 1])\n expected_pos_embeddings = jnp.array([[0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0]])\n\n params = token_pos_embed.init(jax.random.PRNGKey(0), tokens)\n embeddings = token_pos_embed.apply(params, tokens)\n np.testing.assert_allclose(embeddings, expected_pos_embeddings)\n\n def test_unembedding(self):\n # Prepend numbers to preserve basis order [input, index, output]\n input_space = bases.VectorSpaceWithBasis.from_values(\"0inp\", range(2))\n indices_space = bases.VectorSpaceWithBasis.from_values(\"1ind\", range(3))\n output_space = bases.VectorSpaceWithBasis.from_values(\"2out\", range(2))\n residual_space = bases.join_vector_spaces(input_space, indices_space,\n output_space)\n\n @hk.without_apply_rng\n @hk.transform\n def unembed(embeddings):", "metadata": {"task_id": "deepmind--tracr/72", "ground_truth": " embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.unembed(embeddings, use_unembed_argmax=True)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "compiler", "assemble_test.py"], "context_start_lineno": 0, "lineno": 96, "function_name": "unembed"}, "groundtruth": " embed_modules = assemble._make_embedding_modules(\n residual_space=residual_space,\n tokens_space=input_space,\n indices_space=indices_space,\n output_space=output_space)\n return embed_modules.unembed(embeddings, use_unembed_argmax=True)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Attention head for categorical inputs.\"\"\"\n\nfrom typing import Optional\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom typing_extensions import Protocol\n\n\nclass QueryKeyToAttnLogit(Protocol):\n\n def __call__(self, query: bases.BasisDirection,\n key: bases.BasisDirection) -> bool:\n pass\n\n\ndef categorical_attn(\n query_space: bases.VectorSpaceWithBasis,\n key_space: bases.VectorSpaceWithBasis,\n value_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n bos_space: bases.VectorSpaceWithBasis,\n one_space: bases.VectorSpaceWithBasis,\n attn_fn: QueryKeyToAttnLogit,\n default_output: Optional[bases.VectorInBasis] = None,\n causal: bool = False,\n always_attend_to_bos: bool = False,\n use_bos_for_default_output: bool = True,\n softmax_coldness: float = 100.,\n) -> transformers.AttentionHead:\n \"\"\"Returns an attention head for categorical inputs.\n\n Assumes the existence of a beginning of sequence token and attends to it\n always with strength 0.5*softmax_coldness. This allows to implement an\n arbitrary default value for rows in the attention pattern that are all-zero.\n\n Attends to the BOS token if all other key-query pairs have zero attention.\n Hence, the first value in the value sequence will be the default output for\n such cases.\n\n Args:\n query_space: Vector space containing (categorical) query input.\n key_space: Vector space containing (categorical) key input.\n value_space: Vector space containing (numerical) value input.\n output_space: Vector space which will contain (numerical) output.\n bos_space: 1-d space used to identify the beginning of sequence token.\n one_space: 1-d space which contains 1 at every position.\n attn_fn: A selector function f(query, key) operating on the query/key basis\n directions that defines the attention pattern.\n default_output: Output to return if attention pattern is all zero.\n causal: If True, use masked attention.\n always_attend_to_bos: If True, always attend to the BOS token. If False,\n only attend to BOS when attending to nothing else.\n use_bos_for_default_output: If True, assume BOS is not in the value space\n and output a default value when attending to BOS. If False, assume BOS is\n in the value space, and map it to the output space like any other token.\n softmax_coldness: The inverse temperature of the softmax. Default value is\n high which makes the attention close to a hard maximum.\n \"\"\"\n bases.ensure_dims(bos_space, num_dims=1, name=\"bos_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n bos_direction = bos_space.basis[0]\n one_direction = one_space.basis[0]\n\n # Add bos direction to query, key, and value spaces in case it is missing\n query_space = bases.join_vector_spaces(query_space, bos_space, one_space)\n key_space = bases.join_vector_spaces(key_space, bos_space)\n value_space = bases.join_vector_spaces(value_space, bos_space)\n\n if always_attend_to_bos:\n value_basis = value_space.basis\n else:\n value_basis = [v for v in value_space.basis if v != bos_direction]\n assert len(value_basis) == output_space.num_dims\n value_to_output = dict(zip(value_basis, output_space.basis))\n\n if default_output is None:\n default_output = output_space.null_vector()\n assert default_output in output_space\n\n def qk_fun(query: bases.BasisDirection, key: bases.BasisDirection) -> float:\n\n # We want to enforce the following property on our attention patterns:\n # - if nothing else is attended to, attend to the BOS token.\n # - otherwise, don't attend to the BOS token.\n #\n # We assume that the BOS position always only contains the vector bos + one,\n # and that any other position has bos coefficient 0.\n #\n # We do this as follows:\n # Let Q and K be subspaces of V containing the query and key vectors,\n # both disjoint with the BOS space {bos} or the one space {one}.\n # Suppose we have an attn_fn which defines a bilinear W_QK: V x V -> \u211d,\n # s.t. W_QK(q, k) = 0 whenever either q or k are bos or one.\n #\n # Then define W_new: V x V -> \u211d st:\n # W_new(one, bos) = 0.5, otherwise 0.\n #\n # Now set W_QK' = W_QK + W_new.\n #\n # To evaluate the attention to the BOS position:\n # W_QK'(q, bos + one)\n # = W_QK'(q, bos) + W_QK'(q, one)\n # = W_QK(q, bos) + W_QK(q, one) + W_new(q, bos) + W_new(q, one)\n # = 0 + 0 + W_new(q, bos) + W_new(q, one)\n # = W_new(q, bos) + W_new(q, one)\n # = W_new(q' + one, bos) + W_new(q' + one, one) where q = one + q'\n # = W_new(q', bos) + W_new(one, bos) + W_new(q', one) + W_new(one, one)\n # = 0 + 0.5 + 0 + 0\n # = 0.5\n #\n # To evaluate the attention to a non-BOS position:\n # W_QK'(0 * bos + q, 0 * bos + k) # s.t. q \u2208 Q+{one}, k \u2208 K+{one}\n # = 0*W_QK'(bos, 0*bos + k) + W_QK'(q, 0*bos + k)\n # = W_QK'(q, 0*bos + k)\n # = 0*W_QK'(q, bos) + W_QK'(q, k)\n # = W_QK'(q, k)\n # = W_QK(q, k) since W_QK' = W_QK on inputs not containing bos.\n # = W_QK(q', k') since W_QK(x, y) = 0 whenever x or y are one.\n #\n # Since W_QK(q, k) takes values in 0, 1, a sufficiently high softmax\n # coldness will give us the desired property. QED\n #\n # The following implements this idea.\n # By replacing 0.5 with 1, we can instead enforce a different property: that\n # the BOS token is always attended to in addition to whatever else.", "metadata": {"task_id": "deepmind--tracr/73", "ground_truth": " if key == bos_direction and query == one_direction:\n c = 1. if always_attend_to_bos else 0.5\n return c * softmax_coldness\n elif {key, query}.intersection({one_direction, bos_direction}):\n return 0\n\n return softmax_coldness * attn_fn(query, key)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_attn.py"], "context_start_lineno": 0, "lineno": 142, "function_name": "qk_fun"}, "groundtruth": " if key == bos_direction and query == one_direction:\n c = 1. if always_attend_to_bos else 0.5\n return c * softmax_coldness\n elif {key, query}.intersection({one_direction, bos_direction}):\n return 0\n\n return softmax_coldness * attn_fn(query, key)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Attention head for categorical inputs.\"\"\"\n\nfrom typing import Optional\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom typing_extensions import Protocol\n\n\nclass QueryKeyToAttnLogit(Protocol):\n\n def __call__(self, query: bases.BasisDirection,\n key: bases.BasisDirection) -> bool:\n pass\n\n\ndef categorical_attn(\n query_space: bases.VectorSpaceWithBasis,\n key_space: bases.VectorSpaceWithBasis,\n value_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n bos_space: bases.VectorSpaceWithBasis,\n one_space: bases.VectorSpaceWithBasis,\n attn_fn: QueryKeyToAttnLogit,\n default_output: Optional[bases.VectorInBasis] = None,\n causal: bool = False,\n always_attend_to_bos: bool = False,\n use_bos_for_default_output: bool = True,\n softmax_coldness: float = 100.,\n) -> transformers.AttentionHead:\n \"\"\"Returns an attention head for categorical inputs.\n\n Assumes the existence of a beginning of sequence token and attends to it\n always with strength 0.5*softmax_coldness. This allows to implement an\n arbitrary default value for rows in the attention pattern that are all-zero.\n\n Attends to the BOS token if all other key-query pairs have zero attention.\n Hence, the first value in the value sequence will be the default output for\n such cases.\n\n Args:\n query_space: Vector space containing (categorical) query input.\n key_space: Vector space containing (categorical) key input.\n value_space: Vector space containing (numerical) value input.\n output_space: Vector space which will contain (numerical) output.\n bos_space: 1-d space used to identify the beginning of sequence token.\n one_space: 1-d space which contains 1 at every position.\n attn_fn: A selector function f(query, key) operating on the query/key basis\n directions that defines the attention pattern.\n default_output: Output to return if attention pattern is all zero.\n causal: If True, use masked attention.\n always_attend_to_bos: If True, always attend to the BOS token. If False,\n only attend to BOS when attending to nothing else.\n use_bos_for_default_output: If True, assume BOS is not in the value space\n and output a default value when attending to BOS. If False, assume BOS is\n in the value space, and map it to the output space like any other token.\n softmax_coldness: The inverse temperature of the softmax. Default value is\n high which makes the attention close to a hard maximum.\n \"\"\"\n bases.ensure_dims(bos_space, num_dims=1, name=\"bos_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n bos_direction = bos_space.basis[0]\n one_direction = one_space.basis[0]\n\n # Add bos direction to query, key, and value spaces in case it is missing\n query_space = bases.join_vector_spaces(query_space, bos_space, one_space)\n key_space = bases.join_vector_spaces(key_space, bos_space)\n value_space = bases.join_vector_spaces(value_space, bos_space)\n\n if always_attend_to_bos:\n value_basis = value_space.basis\n else:\n value_basis = [v for v in value_space.basis if v != bos_direction]\n assert len(value_basis) == output_space.num_dims\n value_to_output = dict(zip(value_basis, output_space.basis))\n\n if default_output is None:\n default_output = output_space.null_vector()\n assert default_output in output_space\n\n def qk_fun(query: bases.BasisDirection, key: bases.BasisDirection) -> float:\n\n # We want to enforce the following property on our attention patterns:\n # - if nothing else is attended to, attend to the BOS token.\n # - otherwise, don't attend to the BOS token.\n #\n # We assume that the BOS position always only contains the vector bos + one,\n # and that any other position has bos coefficient 0.\n #\n # We do this as follows:\n # Let Q and K be subspaces of V containing the query and key vectors,\n # both disjoint with the BOS space {bos} or the one space {one}.\n # Suppose we have an attn_fn which defines a bilinear W_QK: V x V -> \u211d,\n # s.t. W_QK(q, k) = 0 whenever either q or k are bos or one.\n #\n # Then define W_new: V x V -> \u211d st:\n # W_new(one, bos) = 0.5, otherwise 0.\n #\n # Now set W_QK' = W_QK + W_new.\n #\n # To evaluate the attention to the BOS position:\n # W_QK'(q, bos + one)\n # = W_QK'(q, bos) + W_QK'(q, one)\n # = W_QK(q, bos) + W_QK(q, one) + W_new(q, bos) + W_new(q, one)\n # = 0 + 0 + W_new(q, bos) + W_new(q, one)\n # = W_new(q, bos) + W_new(q, one)\n # = W_new(q' + one, bos) + W_new(q' + one, one) where q = one + q'\n # = W_new(q', bos) + W_new(one, bos) + W_new(q', one) + W_new(one, one)\n # = 0 + 0.5 + 0 + 0\n # = 0.5\n #\n # To evaluate the attention to a non-BOS position:\n # W_QK'(0 * bos + q, 0 * bos + k) # s.t. q \u2208 Q+{one}, k \u2208 K+{one}\n # = 0*W_QK'(bos, 0*bos + k) + W_QK'(q, 0*bos + k)\n # = W_QK'(q, 0*bos + k)\n # = 0*W_QK'(q, bos) + W_QK'(q, k)\n # = W_QK'(q, k)\n # = W_QK(q, k) since W_QK' = W_QK on inputs not containing bos.\n # = W_QK(q', k') since W_QK(x, y) = 0 whenever x or y are one.\n #\n # Since W_QK(q, k) takes values in 0, 1, a sufficiently high softmax\n # coldness will give us the desired property. QED\n #\n # The following implements this idea.\n # By replacing 0.5 with 1, we can instead enforce a different property: that\n # the BOS token is always attended to in addition to whatever else.\n\n if key == bos_direction and query == one_direction:\n c = 1. if always_attend_to_bos else 0.5\n return c * softmax_coldness\n elif {key, query}.intersection({one_direction, bos_direction}):\n return 0\n\n return softmax_coldness * attn_fn(query, key)\n\n w_qk = vectorspace_fns.ScalarBilinear.from_action(\n query_space,\n key_space,\n qk_fun,\n )\n\n def ov_fun(input_dir: bases.BasisDirection) -> bases.VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/74", "ground_truth": " if use_bos_for_default_output and input_dir == bos_direction:\n return default_output\n return output_space.vector_from_basis_direction(value_to_output[input_dir])\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_attn.py"], "context_start_lineno": 0, "lineno": 157, "function_name": "ov_fun"}, "groundtruth": " if use_bos_for_default_output and input_dir == bos_direction:\n return default_output\n return output_space.vector_from_basis_direction(value_to_output[input_dir])\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLPs to compute arbitrary numerical functions by discretising.\"\"\"\n\nimport dataclasses\n\nfrom typing import Callable, Iterable, List\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom tracr.utils import errors\n\n\n@dataclasses.dataclass\nclass DiscretisingLayerMaterials:\n \"\"\"Provides components for a hidden layer that discretises the input.\n\n Attributes:\n action: Function acting on basis directions that defines the computation.\n hidden_space: Vector space of the hidden representation of the layer.\n output_values: Set of output values that correspond to the discretisation.\n \"\"\"\n action: Callable[[bases.BasisDirection], bases.VectorInBasis]\n hidden_space: bases.VectorSpaceWithBasis\n output_values: List[float]\n\n\ndef _get_discretising_layer(input_value_set: Iterable[float],\n f: Callable[[float],\n float], hidden_name: bases.Name,\n one_direction: bases.BasisDirection,\n large_number: float) -> DiscretisingLayerMaterials:\n \"\"\"Creates a hidden layer that discretises the input of f(x) into a value set.\n\n The input is split up into a distinct region around each value in\n `input_value_set`:\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The hidden layer has two activations per threshold:\n hidden_k_1 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_2 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_1 - hidden_k_2 = 1 if x >= threshold[k].\n i.e. we know in which region the input value is.\n\n Args:\n input_value_set: Set of discrete input values.\n f: Function to approximate.\n hidden_name: Name for hidden dimensions.\n one_direction: Auxiliary dimension that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n\n Returns:\n DiscretisingLayerMaterials containing all components for the layer.\n \"\"\"\n output_values, sorted_values = [], []\n for x in sorted(input_value_set):\n res = errors.ignoring_arithmetic_errors(f)(x)\n if res is not None:\n output_values.append(res)\n sorted_values.append(x)\n\n num_vals = len(sorted_values)\n value_thresholds = [\n (sorted_values[i] + sorted_values[i + 1]) / 2 for i in range(num_vals - 1)\n ]\n\n hidden_directions = [bases.BasisDirection(f\"{hidden_name}start\")]\n for k in range(1, num_vals):\n dir0 = bases.BasisDirection(hidden_name, (k, 0))\n dir1 = bases.BasisDirection(hidden_name, (k, 1))\n hidden_directions.extend([dir0, dir1])\n hidden_space = bases.VectorSpaceWithBasis(hidden_directions)\n\n def action(direction: bases.BasisDirection) -> bases.VectorInBasis:\n # hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n # hidden_k_1 = ReLU(L * (x - threshold[k]))", "metadata": {"task_id": "deepmind--tracr/75", "ground_truth": " if direction == one_direction:\n hidden = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(f\"{hidden_name}start\"))\n else:\n hidden = hidden_space.null_vector()\n for k in range(1, num_vals):\n vec0 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 0)))\n vec1 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 1)))\n if direction == one_direction:\n hidden += (1 - large_number * value_thresholds[k - 1]) * vec0\n hidden -= large_number * value_thresholds[k - 1] * vec1\n else:\n hidden += large_number * vec0 + large_number * vec1\n return hidden\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "numerical_mlp.py"], "context_start_lineno": 0, "lineno": 98, "function_name": "action"}, "groundtruth": " if direction == one_direction:\n hidden = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(f\"{hidden_name}start\"))\n else:\n hidden = hidden_space.null_vector()\n for k in range(1, num_vals):\n vec0 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 0)))\n vec1 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 1)))\n if direction == one_direction:\n hidden += (1 - large_number * value_thresholds[k - 1]) * vec0\n hidden -= large_number * value_thresholds[k - 1] * vec1\n else:\n hidden += large_number * vec0 + large_number * vec1\n return hidden\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLPs to compute arbitrary numerical functions by discretising.\"\"\"\n\nimport dataclasses\n\nfrom typing import Callable, Iterable, List\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom tracr.utils import errors\n\n\n@dataclasses.dataclass\nclass DiscretisingLayerMaterials:\n \"\"\"Provides components for a hidden layer that discretises the input.\n\n Attributes:\n action: Function acting on basis directions that defines the computation.\n hidden_space: Vector space of the hidden representation of the layer.\n output_values: Set of output values that correspond to the discretisation.\n \"\"\"\n action: Callable[[bases.BasisDirection], bases.VectorInBasis]\n hidden_space: bases.VectorSpaceWithBasis\n output_values: List[float]\n\n\ndef _get_discretising_layer(input_value_set: Iterable[float],\n f: Callable[[float],\n float], hidden_name: bases.Name,\n one_direction: bases.BasisDirection,\n large_number: float) -> DiscretisingLayerMaterials:\n \"\"\"Creates a hidden layer that discretises the input of f(x) into a value set.\n\n The input is split up into a distinct region around each value in\n `input_value_set`:\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The hidden layer has two activations per threshold:\n hidden_k_1 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_2 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_1 - hidden_k_2 = 1 if x >= threshold[k].\n i.e. we know in which region the input value is.\n\n Args:\n input_value_set: Set of discrete input values.\n f: Function to approximate.\n hidden_name: Name for hidden dimensions.\n one_direction: Auxiliary dimension that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n\n Returns:\n DiscretisingLayerMaterials containing all components for the layer.\n \"\"\"\n output_values, sorted_values = [], []\n for x in sorted(input_value_set):\n res = errors.ignoring_arithmetic_errors(f)(x)\n if res is not None:\n output_values.append(res)\n sorted_values.append(x)\n\n num_vals = len(sorted_values)\n value_thresholds = [\n (sorted_values[i] + sorted_values[i + 1]) / 2 for i in range(num_vals - 1)\n ]\n\n hidden_directions = [bases.BasisDirection(f\"{hidden_name}start\")]\n for k in range(1, num_vals):\n dir0 = bases.BasisDirection(hidden_name, (k, 0))\n dir1 = bases.BasisDirection(hidden_name, (k, 1))\n hidden_directions.extend([dir0, dir1])\n hidden_space = bases.VectorSpaceWithBasis(hidden_directions)\n\n def action(direction: bases.BasisDirection) -> bases.VectorInBasis:\n # hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n # hidden_k_1 = ReLU(L * (x - threshold[k]))\n if direction == one_direction:\n hidden = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(f\"{hidden_name}start\"))\n else:\n hidden = hidden_space.null_vector()\n for k in range(1, num_vals):\n vec0 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 0)))\n vec1 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 1)))\n if direction == one_direction:\n hidden += (1 - large_number * value_thresholds[k - 1]) * vec0\n hidden -= large_number * value_thresholds[k - 1] * vec1\n else:\n hidden += large_number * vec0 + large_number * vec1\n return hidden\n\n return DiscretisingLayerMaterials(\n action=action, hidden_space=hidden_space, output_values=output_values)\n\n\ndef map_numerical_mlp(\n f: Callable[[float], float],\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n input_value_set: Iterable[float],\n one_space: bases.VectorSpaceWithBasis,\n large_number: float = 100,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any function of a single variable f(x).\n\n This is implemented by discretising the input according to input_value_set\n and defining thresholds that determine which part of the input range will\n is allocated to which value in input_value_set.\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The MLP computes two hidden activations per threshold:\n hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_1 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_0 - hidden_k_1 = 1 if x >= threshold[k].\n\n The MLP then computes the output as:\n output = f(input[0]) +\n sum((hidden_k_0 - hidden_k_1) * (f(input[k]) - f(input[k-1]))\n for all k=0,1,...)\n\n This sum will be (by a telescoping sums argument)\n f(input[0]) if x <= threshold[0]\n f(input[k]) if threshold[k-1] < x <= threshold[k] for some other k\n f(input[-1]) if x > threshold[-1]\n which approximates f() up to an accuracy given by input_value_set and L.\n\n Args:\n f: Function to approximate.\n input_space: 1-d vector space that encodes the input x.\n output_space: 1-d vector space to write the output to.\n input_value_set: Set of values the input can take.\n one_space: Auxiliary 1-d vector space that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n Note that too large values of L can lead to numerical issues, particularly\n during inference on GPU/TPU.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(input_space, num_dims=1, name=\"input_space\")\n bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n input_space = bases.join_vector_spaces(input_space, one_space)\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n discretising_layer = _get_discretising_layer(\n input_value_set=input_value_set,\n f=f,\n hidden_name=hidden_name,\n one_direction=one_space.basis[0],\n large_number=large_number)\n first_layer = vectorspace_fns.Linear.from_action(\n input_space, discretising_layer.hidden_space, discretising_layer.action)\n\n def second_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:\n # output = sum(\n # (hidden_k_0 - hidden_k_1) * (f(input[k]) - f(input[k-1]))\n # for all k)", "metadata": {"task_id": "deepmind--tracr/76", "ground_truth": " if direction.name == f\"{hidden_name}start\":\n return discretising_layer.output_values[0] * out_vec\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n return sign * (discretising_layer.output_values[k] -\n discretising_layer.output_values[k - 1]) * out_vec\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "numerical_mlp.py"], "context_start_lineno": 0, "lineno": 192, "function_name": "second_layer_action"}, "groundtruth": " if direction.name == f\"{hidden_name}start\":\n return discretising_layer.output_values[0] * out_vec\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n return sign * (discretising_layer.output_values[k] -\n discretising_layer.output_values[k - 1]) * out_vec\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLPs to compute arbitrary numerical functions by discretising.\"\"\"\n\nimport dataclasses\n\nfrom typing import Callable, Iterable, List\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom tracr.utils import errors\n\n\n@dataclasses.dataclass\nclass DiscretisingLayerMaterials:\n \"\"\"Provides components for a hidden layer that discretises the input.\n\n Attributes:\n action: Function acting on basis directions that defines the computation.\n hidden_space: Vector space of the hidden representation of the layer.\n output_values: Set of output values that correspond to the discretisation.\n \"\"\"\n action: Callable[[bases.BasisDirection], bases.VectorInBasis]\n hidden_space: bases.VectorSpaceWithBasis\n output_values: List[float]\n\n\ndef _get_discretising_layer(input_value_set: Iterable[float],\n f: Callable[[float],\n float], hidden_name: bases.Name,\n one_direction: bases.BasisDirection,\n large_number: float) -> DiscretisingLayerMaterials:\n \"\"\"Creates a hidden layer that discretises the input of f(x) into a value set.\n\n The input is split up into a distinct region around each value in\n `input_value_set`:\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The hidden layer has two activations per threshold:\n hidden_k_1 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_2 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_1 - hidden_k_2 = 1 if x >= threshold[k].\n i.e. we know in which region the input value is.\n\n Args:\n input_value_set: Set of discrete input values.\n f: Function to approximate.\n hidden_name: Name for hidden dimensions.\n one_direction: Auxiliary dimension that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n\n Returns:\n DiscretisingLayerMaterials containing all components for the layer.\n \"\"\"\n output_values, sorted_values = [], []\n for x in sorted(input_value_set):\n res = errors.ignoring_arithmetic_errors(f)(x)\n if res is not None:\n output_values.append(res)\n sorted_values.append(x)\n\n num_vals = len(sorted_values)\n value_thresholds = [\n (sorted_values[i] + sorted_values[i + 1]) / 2 for i in range(num_vals - 1)\n ]\n\n hidden_directions = [bases.BasisDirection(f\"{hidden_name}start\")]\n for k in range(1, num_vals):\n dir0 = bases.BasisDirection(hidden_name, (k, 0))\n dir1 = bases.BasisDirection(hidden_name, (k, 1))\n hidden_directions.extend([dir0, dir1])\n hidden_space = bases.VectorSpaceWithBasis(hidden_directions)\n\n def action(direction: bases.BasisDirection) -> bases.VectorInBasis:\n # hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n # hidden_k_1 = ReLU(L * (x - threshold[k]))\n if direction == one_direction:\n hidden = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(f\"{hidden_name}start\"))\n else:\n hidden = hidden_space.null_vector()\n for k in range(1, num_vals):\n vec0 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 0)))\n vec1 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 1)))\n if direction == one_direction:\n hidden += (1 - large_number * value_thresholds[k - 1]) * vec0\n hidden -= large_number * value_thresholds[k - 1] * vec1\n else:\n hidden += large_number * vec0 + large_number * vec1\n return hidden\n\n return DiscretisingLayerMaterials(\n action=action, hidden_space=hidden_space, output_values=output_values)\n\n\ndef map_numerical_mlp(\n f: Callable[[float], float],\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n input_value_set: Iterable[float],\n one_space: bases.VectorSpaceWithBasis,\n large_number: float = 100,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any function of a single variable f(x).\n\n This is implemented by discretising the input according to input_value_set\n and defining thresholds that determine which part of the input range will\n is allocated to which value in input_value_set.\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The MLP computes two hidden activations per threshold:\n hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_1 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_0 - hidden_k_1 = 1 if x >= threshold[k].\n\n The MLP then computes the output as:\n output = f(input[0]) +\n sum((hidden_k_0 - hidden_k_1) * (f(input[k]) - f(input[k-1]))\n for all k=0,1,...)\n\n This sum will be (by a telescoping sums argument)\n f(input[0]) if x <= threshold[0]\n f(input[k]) if threshold[k-1] < x <= threshold[k] for some other k\n f(input[-1]) if x > threshold[-1]\n which approximates f() up to an accuracy given by input_value_set and L.\n\n Args:\n f: Function to approximate.\n input_space: 1-d vector space that encodes the input x.\n output_space: 1-d vector space to write the output to.\n input_value_set: Set of values the input can take.\n one_space: Auxiliary 1-d vector space that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n Note that too large values of L can lead to numerical issues, particularly\n during inference on GPU/TPU.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(input_space, num_dims=1, name=\"input_space\")\n bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n input_space = bases.join_vector_spaces(input_space, one_space)\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n discretising_layer = _get_discretising_layer(\n input_value_set=input_value_set,\n f=f,\n hidden_name=hidden_name,\n one_direction=one_space.basis[0],\n large_number=large_number)\n first_layer = vectorspace_fns.Linear.from_action(\n input_space, discretising_layer.hidden_space, discretising_layer.action)\n\n def second_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:\n # output = sum(\n # (hidden_k_0 - hidden_k_1) * (f(input[k]) - f(input[k-1]))\n # for all k)\n if direction.name == f\"{hidden_name}start\":\n return discretising_layer.output_values[0] * out_vec\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n return sign * (discretising_layer.output_values[k] -\n discretising_layer.output_values[k - 1]) * out_vec\n\n second_layer = vectorspace_fns.Linear.from_action(\n discretising_layer.hidden_space, output_space, second_layer_action)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef map_numerical_to_categorical_mlp(\n f: Callable[[float], float],\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n input_value_set: Iterable[float],\n one_space: bases.VectorSpaceWithBasis,\n large_number: float = 100,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP to compute f(x) from a numerical to a categorical variable.\n\n Uses a set of possible output values, and rounds f(x) to the closest value\n in this set to create a categorical output variable.\n\n The output is discretised the same way as in `map_numerical_mlp`.\n\n Args:\n f: Function to approximate.\n input_space: 1-d vector space that encodes the input x.\n output_space: n-d vector space to write categorical output to. The output\n directions need to encode the possible output values.\n input_value_set: Set of values the input can take.\n one_space: Auxiliary 1-d space that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(input_space, num_dims=1, name=\"input_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n input_space = bases.join_vector_spaces(input_space, one_space)\n\n vec_by_out_val = dict()\n for d in output_space.basis:\n # TODO(b/255937603): Do a similar assert in other places where we expect\n # categorical basis directions to encode values.\n assert d.value is not None, (\"output directions need to encode \"\n \"possible output values\")\n vec_by_out_val[d.value] = output_space.vector_from_basis_direction(d)\n\n discretising_layer = _get_discretising_layer(\n input_value_set=input_value_set,\n f=f,\n hidden_name=hidden_name,\n one_direction=one_space.basis[0],\n large_number=large_number)\n\n assert set(discretising_layer.output_values).issubset(\n set(vec_by_out_val.keys()))\n\n first_layer = vectorspace_fns.Linear.from_action(\n input_space, discretising_layer.hidden_space, discretising_layer.action)\n\n def second_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:\n \"\"\"Computes output value and returns corresponding output direction.\"\"\"", "metadata": {"task_id": "deepmind--tracr/77", "ground_truth": " if direction.name == f\"{hidden_name}start\":\n return vec_by_out_val[discretising_layer.output_values[0]]\n else:\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n out_k = discretising_layer.output_values[k]\n out_k_m_1 = discretising_layer.output_values[k - 1]\n return sign * (vec_by_out_val[out_k] - vec_by_out_val[out_k_m_1])\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "numerical_mlp.py"], "context_start_lineno": 0, "lineno": 261, "function_name": "second_layer_action"}, "groundtruth": " if direction.name == f\"{hidden_name}start\":\n return vec_by_out_val[discretising_layer.output_values[0]]\n else:\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n out_k = discretising_layer.output_values[k]\n out_k_m_1 = discretising_layer.output_values[k - 1]\n return sign * (vec_by_out_val[out_k] - vec_by_out_val[out_k_m_1])\n"} +{"prompt": "\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLPs to compute arbitrary numerical functions by discretising.\"\"\"\n\nimport dataclasses\n\nfrom typing import Callable, Iterable, List\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom tracr.utils import errors\n\n\n@dataclasses.dataclass\nclass DiscretisingLayerMaterials:\n \"\"\"Provides components for a hidden layer that discretises the input.\n\n Attributes:\n action: Function acting on basis directions that defines the computation.\n hidden_space: Vector space of the hidden representation of the layer.\n output_values: Set of output values that correspond to the discretisation.\n \"\"\"\n action: Callable[[bases.BasisDirection], bases.VectorInBasis]\n hidden_space: bases.VectorSpaceWithBasis\n output_values: List[float]\n\n\ndef _get_discretising_layer(input_value_set: Iterable[float],\n f: Callable[[float],\n float], hidden_name: bases.Name,\n one_direction: bases.BasisDirection,\n large_number: float) -> DiscretisingLayerMaterials:\n \"\"\"Creates a hidden layer that discretises the input of f(x) into a value set.\n\n The input is split up into a distinct region around each value in\n `input_value_set`:\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The hidden layer has two activations per threshold:\n hidden_k_1 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_2 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_1 - hidden_k_2 = 1 if x >= threshold[k].\n i.e. we know in which region the input value is.\n\n Args:\n input_value_set: Set of discrete input values.\n f: Function to approximate.\n hidden_name: Name for hidden dimensions.\n one_direction: Auxiliary dimension that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n\n Returns:\n DiscretisingLayerMaterials containing all components for the layer.\n \"\"\"\n output_values, sorted_values = [], []\n for x in sorted(input_value_set):\n res = errors.ignoring_arithmetic_errors(f)(x)\n if res is not None:\n output_values.append(res)\n sorted_values.append(x)\n\n num_vals = len(sorted_values)\n value_thresholds = [\n (sorted_values[i] + sorted_values[i + 1]) / 2 for i in range(num_vals - 1)\n ]\n\n hidden_directions = [bases.BasisDirection(f\"{hidden_name}start\")]\n for k in range(1, num_vals):\n dir0 = bases.BasisDirection(hidden_name, (k, 0))\n dir1 = bases.BasisDirection(hidden_name, (k, 1))\n hidden_directions.extend([dir0, dir1])\n hidden_space = bases.VectorSpaceWithBasis(hidden_directions)\n\n def action(direction: bases.BasisDirection) -> bases.VectorInBasis:\n # hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n # hidden_k_1 = ReLU(L * (x - threshold[k]))\n if direction == one_direction:\n hidden = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(f\"{hidden_name}start\"))\n else:\n hidden = hidden_space.null_vector()\n for k in range(1, num_vals):\n vec0 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 0)))\n vec1 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 1)))\n if direction == one_direction:\n hidden += (1 - large_number * value_thresholds[k - 1]) * vec0\n hidden -= large_number * value_thresholds[k - 1] * vec1\n else:\n hidden += large_number * vec0 + large_number * vec1\n return hidden\n\n return DiscretisingLayerMaterials(\n action=action, hidden_space=hidden_space, output_values=output_values)\n\n\ndef map_numerical_mlp(\n f: Callable[[float], float],\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n input_value_set: Iterable[float],\n one_space: bases.VectorSpaceWithBasis,\n large_number: float = 100,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any function of a single variable f(x).\n\n This is implemented by discretising the input according to input_value_set\n and defining thresholds that determine which part of the input range will\n is allocated to which value in input_value_set.\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The MLP computes two hidden activations per threshold:\n hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_1 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_0 - hidden_k_1 = 1 if x >= threshold[k].\n\n The MLP then computes the output as:\n output = f(input[0]) +\n sum((hidden_k_0 - hidden_k_1) * (f(input[k]) - f(input[k-1]))\n for all k=0,1,...)\n\n This sum will be (by a telescoping sums argument)\n f(input[0]) if x <= threshold[0]\n f(input[k]) if threshold[k-1] < x <= threshold[k] for some other k\n f(input[-1]) if x > threshold[-1]\n which approximates f() up to an accuracy given by input_value_set and L.\n\n Args:\n f: Function to approximate.\n input_space: 1-d vector space that encodes the input x.\n output_space: 1-d vector space to write the output to.\n input_value_set: Set of values the input can take.\n one_space: Auxiliary 1-d vector space that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n Note that too large values of L can lead to numerical issues, particularly\n during inference on GPU/TPU.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(input_space, num_dims=1, name=\"input_space\")\n bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n input_space = bases.join_vector_spaces(input_space, one_space)\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n discretising_layer = _get_discretising_layer(\n input_value_set=input_value_set,\n f=f,\n hidden_name=hidden_name,\n one_direction=one_space.basis[0],\n large_number=large_number)\n first_layer = vectorspace_fns.Linear.from_action(\n input_space, discretising_layer.hidden_space, discretising_layer.action)\n\n def second_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:\n # output = sum(\n # (hidden_k_0 - hidden_k_1) * (f(input[k]) - f(input[k-1]))\n # for all k)\n if direction.name == f\"{hidden_name}start\":\n return discretising_layer.output_values[0] * out_vec\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n return sign * (discretising_layer.output_values[k] -\n discretising_layer.output_values[k - 1]) * out_vec\n\n second_layer = vectorspace_fns.Linear.from_action(\n discretising_layer.hidden_space, output_space, second_layer_action)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef map_numerical_to_categorical_mlp(\n f: Callable[[float], float],\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n input_value_set: Iterable[float],\n one_space: bases.VectorSpaceWithBasis,\n large_number: float = 100,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP to compute f(x) from a numerical to a categorical variable.\n\n Uses a set of possible output values, and rounds f(x) to the closest value\n in this set to create a categorical output variable.\n\n The output is discretised the same way as in `map_numerical_mlp`.\n\n Args:\n f: Function to approximate.\n input_space: 1-d vector space that encodes the input x.\n output_space: n-d vector space to write categorical output to. The output\n directions need to encode the possible output values.\n input_value_set: Set of values the input can take.\n one_space: Auxiliary 1-d space that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(input_space, num_dims=1, name=\"input_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n input_space = bases.join_vector_spaces(input_space, one_space)\n\n vec_by_out_val = dict()\n for d in output_space.basis:\n # TODO(b/255937603): Do a similar assert in other places where we expect\n # categorical basis directions to encode values.\n assert d.value is not None, (\"output directions need to encode \"\n \"possible output values\")\n vec_by_out_val[d.value] = output_space.vector_from_basis_direction(d)\n\n discretising_layer = _get_discretising_layer(\n input_value_set=input_value_set,\n f=f,\n hidden_name=hidden_name,\n one_direction=one_space.basis[0],\n large_number=large_number)\n\n assert set(discretising_layer.output_values).issubset(\n set(vec_by_out_val.keys()))\n\n first_layer = vectorspace_fns.Linear.from_action(\n input_space, discretising_layer.hidden_space, discretising_layer.action)\n\n def second_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:\n \"\"\"Computes output value and returns corresponding output direction.\"\"\"\n if direction.name == f\"{hidden_name}start\":\n return vec_by_out_val[discretising_layer.output_values[0]]\n else:\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n out_k = discretising_layer.output_values[k]\n out_k_m_1 = discretising_layer.output_values[k - 1]\n return sign * (vec_by_out_val[out_k] - vec_by_out_val[out_k_m_1])\n\n second_layer = vectorspace_fns.Linear.from_action(\n discretising_layer.hidden_space, output_space, second_layer_action)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef linear_sequence_map_numerical_mlp(\n input1_basis_direction: bases.BasisDirection,\n input2_basis_direction: bases.BasisDirection,\n output_basis_direction: bases.BasisDirection,\n input1_factor: float,\n input2_factor: float,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes a linear function f(x, y) = a*x + b*y.\n\n Args:\n input1_basis_direction: Basis direction that encodes the input x.\n input2_basis_direction: Basis direction that encodes the input y.\n output_basis_direction: Basis direction to write the output to.\n input1_factor: Linear factor a for input x.\n input2_factor: Linear factor a for input y.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n input_space = bases.VectorSpaceWithBasis(\n [input1_basis_direction, input2_basis_direction])\n output_space = bases.VectorSpaceWithBasis([output_basis_direction])\n out_vec = output_space.vector_from_basis_direction(output_basis_direction)\n\n hidden_directions = [\n bases.BasisDirection(f\"{hidden_name}x\", 1),\n bases.BasisDirection(f\"{hidden_name}x\", -1),\n bases.BasisDirection(f\"{hidden_name}y\", 1),\n bases.BasisDirection(f\"{hidden_name}y\", -1)\n ]\n hidden_space = bases.VectorSpaceWithBasis(hidden_directions)\n x_pos_vec, x_neg_vec, y_pos_vec, y_neg_vec = (\n hidden_space.vector_from_basis_direction(d) for d in hidden_directions)\n\n def first_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/78", "ground_truth": " output = hidden_space.null_vector()\n if direction == input1_basis_direction:\n output += x_pos_vec - x_neg_vec\n if direction == input2_basis_direction:\n output += y_pos_vec - y_neg_vec\n return output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "numerical_mlp.py"], "context_start_lineno": 1, "lineno": 312, "function_name": "first_layer_action"}, "groundtruth": " output = hidden_space.null_vector()\n if direction == input1_basis_direction:\n output += x_pos_vec - x_neg_vec\n if direction == input2_basis_direction:\n output += y_pos_vec - y_neg_vec\n return output\n"} +{"prompt": "\n# ==============================================================================\n\"\"\"MLPs to compute arbitrary numerical functions by discretising.\"\"\"\n\nimport dataclasses\n\nfrom typing import Callable, Iterable, List\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\nfrom tracr.utils import errors\n\n\n@dataclasses.dataclass\nclass DiscretisingLayerMaterials:\n \"\"\"Provides components for a hidden layer that discretises the input.\n\n Attributes:\n action: Function acting on basis directions that defines the computation.\n hidden_space: Vector space of the hidden representation of the layer.\n output_values: Set of output values that correspond to the discretisation.\n \"\"\"\n action: Callable[[bases.BasisDirection], bases.VectorInBasis]\n hidden_space: bases.VectorSpaceWithBasis\n output_values: List[float]\n\n\ndef _get_discretising_layer(input_value_set: Iterable[float],\n f: Callable[[float],\n float], hidden_name: bases.Name,\n one_direction: bases.BasisDirection,\n large_number: float) -> DiscretisingLayerMaterials:\n \"\"\"Creates a hidden layer that discretises the input of f(x) into a value set.\n\n The input is split up into a distinct region around each value in\n `input_value_set`:\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The hidden layer has two activations per threshold:\n hidden_k_1 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_2 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_1 - hidden_k_2 = 1 if x >= threshold[k].\n i.e. we know in which region the input value is.\n\n Args:\n input_value_set: Set of discrete input values.\n f: Function to approximate.\n hidden_name: Name for hidden dimensions.\n one_direction: Auxiliary dimension that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n\n Returns:\n DiscretisingLayerMaterials containing all components for the layer.\n \"\"\"\n output_values, sorted_values = [], []\n for x in sorted(input_value_set):\n res = errors.ignoring_arithmetic_errors(f)(x)\n if res is not None:\n output_values.append(res)\n sorted_values.append(x)\n\n num_vals = len(sorted_values)\n value_thresholds = [\n (sorted_values[i] + sorted_values[i + 1]) / 2 for i in range(num_vals - 1)\n ]\n\n hidden_directions = [bases.BasisDirection(f\"{hidden_name}start\")]\n for k in range(1, num_vals):\n dir0 = bases.BasisDirection(hidden_name, (k, 0))\n dir1 = bases.BasisDirection(hidden_name, (k, 1))\n hidden_directions.extend([dir0, dir1])\n hidden_space = bases.VectorSpaceWithBasis(hidden_directions)\n\n def action(direction: bases.BasisDirection) -> bases.VectorInBasis:\n # hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n # hidden_k_1 = ReLU(L * (x - threshold[k]))\n if direction == one_direction:\n hidden = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(f\"{hidden_name}start\"))\n else:\n hidden = hidden_space.null_vector()\n for k in range(1, num_vals):\n vec0 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 0)))\n vec1 = hidden_space.vector_from_basis_direction(\n bases.BasisDirection(hidden_name, (k, 1)))\n if direction == one_direction:\n hidden += (1 - large_number * value_thresholds[k - 1]) * vec0\n hidden -= large_number * value_thresholds[k - 1] * vec1\n else:\n hidden += large_number * vec0 + large_number * vec1\n return hidden\n\n return DiscretisingLayerMaterials(\n action=action, hidden_space=hidden_space, output_values=output_values)\n\n\ndef map_numerical_mlp(\n f: Callable[[float], float],\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n input_value_set: Iterable[float],\n one_space: bases.VectorSpaceWithBasis,\n large_number: float = 100,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any function of a single variable f(x).\n\n This is implemented by discretising the input according to input_value_set\n and defining thresholds that determine which part of the input range will\n is allocated to which value in input_value_set.\n\n elements of value set: v0 | v1 | v2 | v3 | v4 | ...\n thresholds: t0 t1 t2 t3 t4\n\n The MLP computes two hidden activations per threshold:\n hidden_k_0 = ReLU(L * (x - threshold[k]) + 1)\n hidden_k_1 = ReLU(L * (x - threshold[k]))\n\n Note that hidden_k_1 - hidden_k_2 is:\n 1 if x >= threshold[k] + 1/L\n 0 if x <= threshold[k]\n between 0 and 1 if threshold[k] < x < threshold[k] + 1/L\n\n So as long as we choose L a big enough number, we have\n hidden_k_0 - hidden_k_1 = 1 if x >= threshold[k].\n\n The MLP then computes the output as:\n output = f(input[0]) +\n sum((hidden_k_0 - hidden_k_1) * (f(input[k]) - f(input[k-1]))\n for all k=0,1,...)\n\n This sum will be (by a telescoping sums argument)\n f(input[0]) if x <= threshold[0]\n f(input[k]) if threshold[k-1] < x <= threshold[k] for some other k\n f(input[-1]) if x > threshold[-1]\n which approximates f() up to an accuracy given by input_value_set and L.\n\n Args:\n f: Function to approximate.\n input_space: 1-d vector space that encodes the input x.\n output_space: 1-d vector space to write the output to.\n input_value_set: Set of values the input can take.\n one_space: Auxiliary 1-d vector space that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n Note that too large values of L can lead to numerical issues, particularly\n during inference on GPU/TPU.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(input_space, num_dims=1, name=\"input_space\")\n bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n input_space = bases.join_vector_spaces(input_space, one_space)\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n discretising_layer = _get_discretising_layer(\n input_value_set=input_value_set,\n f=f,\n hidden_name=hidden_name,\n one_direction=one_space.basis[0],\n large_number=large_number)\n first_layer = vectorspace_fns.Linear.from_action(\n input_space, discretising_layer.hidden_space, discretising_layer.action)\n\n def second_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:\n # output = sum(\n # (hidden_k_0 - hidden_k_1) * (f(input[k]) - f(input[k-1]))\n # for all k)\n if direction.name == f\"{hidden_name}start\":\n return discretising_layer.output_values[0] * out_vec\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n return sign * (discretising_layer.output_values[k] -\n discretising_layer.output_values[k - 1]) * out_vec\n\n second_layer = vectorspace_fns.Linear.from_action(\n discretising_layer.hidden_space, output_space, second_layer_action)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef map_numerical_to_categorical_mlp(\n f: Callable[[float], float],\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n input_value_set: Iterable[float],\n one_space: bases.VectorSpaceWithBasis,\n large_number: float = 100,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP to compute f(x) from a numerical to a categorical variable.\n\n Uses a set of possible output values, and rounds f(x) to the closest value\n in this set to create a categorical output variable.\n\n The output is discretised the same way as in `map_numerical_mlp`.\n\n Args:\n f: Function to approximate.\n input_space: 1-d vector space that encodes the input x.\n output_space: n-d vector space to write categorical output to. The output\n directions need to encode the possible output values.\n input_value_set: Set of values the input can take.\n one_space: Auxiliary 1-d space that must contain 1 in the input.\n large_number: Large number L that determines accuracy of the computation.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(input_space, num_dims=1, name=\"input_space\")\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n input_space = bases.join_vector_spaces(input_space, one_space)\n\n vec_by_out_val = dict()\n for d in output_space.basis:\n # TODO(b/255937603): Do a similar assert in other places where we expect\n # categorical basis directions to encode values.\n assert d.value is not None, (\"output directions need to encode \"\n \"possible output values\")\n vec_by_out_val[d.value] = output_space.vector_from_basis_direction(d)\n\n discretising_layer = _get_discretising_layer(\n input_value_set=input_value_set,\n f=f,\n hidden_name=hidden_name,\n one_direction=one_space.basis[0],\n large_number=large_number)\n\n assert set(discretising_layer.output_values).issubset(\n set(vec_by_out_val.keys()))\n\n first_layer = vectorspace_fns.Linear.from_action(\n input_space, discretising_layer.hidden_space, discretising_layer.action)\n\n def second_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:\n \"\"\"Computes output value and returns corresponding output direction.\"\"\"\n if direction.name == f\"{hidden_name}start\":\n return vec_by_out_val[discretising_layer.output_values[0]]\n else:\n k, i = direction.value\n # add hidden_k_0 and subtract hidden_k_1\n sign = {0: 1, 1: -1}[i]\n out_k = discretising_layer.output_values[k]\n out_k_m_1 = discretising_layer.output_values[k - 1]\n return sign * (vec_by_out_val[out_k] - vec_by_out_val[out_k_m_1])\n\n second_layer = vectorspace_fns.Linear.from_action(\n discretising_layer.hidden_space, output_space, second_layer_action)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef linear_sequence_map_numerical_mlp(\n input1_basis_direction: bases.BasisDirection,\n input2_basis_direction: bases.BasisDirection,\n output_basis_direction: bases.BasisDirection,\n input1_factor: float,\n input2_factor: float,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes a linear function f(x, y) = a*x + b*y.\n\n Args:\n input1_basis_direction: Basis direction that encodes the input x.\n input2_basis_direction: Basis direction that encodes the input y.\n output_basis_direction: Basis direction to write the output to.\n input1_factor: Linear factor a for input x.\n input2_factor: Linear factor a for input y.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n input_space = bases.VectorSpaceWithBasis(\n [input1_basis_direction, input2_basis_direction])\n output_space = bases.VectorSpaceWithBasis([output_basis_direction])\n out_vec = output_space.vector_from_basis_direction(output_basis_direction)\n\n hidden_directions = [\n bases.BasisDirection(f\"{hidden_name}x\", 1),\n bases.BasisDirection(f\"{hidden_name}x\", -1),\n bases.BasisDirection(f\"{hidden_name}y\", 1),\n bases.BasisDirection(f\"{hidden_name}y\", -1)\n ]\n hidden_space = bases.VectorSpaceWithBasis(hidden_directions)\n x_pos_vec, x_neg_vec, y_pos_vec, y_neg_vec = (\n hidden_space.vector_from_basis_direction(d) for d in hidden_directions)\n\n def first_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:\n output = hidden_space.null_vector()\n if direction == input1_basis_direction:\n output += x_pos_vec - x_neg_vec\n if direction == input2_basis_direction:\n output += y_pos_vec - y_neg_vec\n return output\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, hidden_space,\n first_layer_action)\n\n def second_layer_action(\n direction: bases.BasisDirection) -> bases.VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/79", "ground_truth": " if direction.name == f\"{hidden_name}x\":\n return input1_factor * direction.value * out_vec\n if direction.name == f\"{hidden_name}y\":\n return input2_factor * direction.value * out_vec\n return output_space.null_vector()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "numerical_mlp.py"], "context_start_lineno": 12, "lineno": 324, "function_name": "second_layer_action"}, "groundtruth": " if direction.name == f\"{hidden_name}x\":\n return input1_factor * direction.value * out_vec\n if direction.name == f\"{hidden_name}y\":\n return input2_factor * direction.value * out_vec\n return output_space.null_vector()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for chamber.categorical_attn.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport numpy as np\nfrom tracr.craft import bases\nfrom tracr.craft import tests_common\nfrom tracr.craft.chamber import categorical_attn\n\n\nclass CategoricalAttnTest(tests_common.VectorFnTestCase):\n\n @parameterized.parameters([\n dict(causal=False, input_seq=[1, 2, 3, 4, 5], result_seq=[3, 3, 3, 3, 3]),\n dict(\n causal=True,\n input_seq=[1, 2, 3, 4, 5],\n result_seq=[1, 1.5, 2, 2.5, 3]),\n dict(causal=False, input_seq=[10], result_seq=[10]),\n dict(causal=True, input_seq=[10], result_seq=[10]),\n dict(causal=False, input_seq=[-1, 0, 1], result_seq=[0, 0, 0]),\n dict(causal=True, input_seq=[-1, 0, 1], result_seq=[-1, -0.5, 0]),\n ])\n def test_categorical_attn_can_implement_select_all(self, causal, input_seq,\n result_seq):\n vocab = range(-20, 20)\n input_space = bases.VectorSpaceWithBasis.from_values(\"input\", vocab)\n\n output_dir = bases.BasisDirection(\"output\")\n output_space = bases.VectorSpaceWithBasis([output_dir])\n output_vec = output_space.vector_from_basis_direction(output_dir)\n\n bos_dir = bases.BasisDirection(\"bos_dimension\")\n bos_space = bases.VectorSpaceWithBasis([bos_dir])\n\n one_dir = bases.BasisDirection(\"one\")\n one_space = bases.VectorSpaceWithBasis([one_dir])\n\n value_dir = bases.BasisDirection(\"value\")\n value_space = bases.VectorSpaceWithBasis([value_dir])\n\n input_space = bases.join_vector_spaces(input_space, bos_space, one_space)\n value_space = bases.join_vector_spaces(value_space, bos_space)\n residual_space = bases.join_vector_spaces(input_space, value_space,\n output_space)\n one_vec = residual_space.vector_from_basis_direction(one_dir)\n bos_vec = residual_space.vector_from_basis_direction(bos_dir)\n value_vec = residual_space.vector_from_basis_direction(value_dir)\n\n attn = categorical_attn.categorical_attn(\n key_space=input_space,\n query_space=input_space,\n value_space=value_space,\n output_space=output_space,\n bos_space=bos_space,\n one_space=one_space,\n attn_fn=lambda x, y: True,\n causal=causal)\n\n test_inputs = [bos_vec + one_vec]\n for x in input_seq:\n test_inputs.append(\n residual_space.vector_from_basis_direction(\n bases.BasisDirection(\"input\", x)) + x * value_vec)\n test_inputs = bases.VectorInBasis.stack(test_inputs)\n\n # Expect the average of all (previous) tokens\n expected_results = [x * output_vec for x in result_seq]\n expected_results = bases.VectorInBasis.stack(expected_results)\n\n test_outputs = attn.apply(test_inputs).project(output_space)\n\n self.assertVectorAllClose(\n tests_common.strip_bos_token(test_outputs), expected_results)\n\n @parameterized.parameters([\n dict(causal=False, input_seq=[1, 2, 3, 4, 5], default=0),\n dict(causal=True, input_seq=[1, 2, 3, 4, 5], default=1),\n dict(causal=False, input_seq=[10], default=2),\n dict(causal=True, input_seq=[10], default=-3),\n dict(causal=False, input_seq=[-1, 0, 1], default=-2),\n dict(causal=True, input_seq=[-1, 0, 1], default=-1),\n ])\n def test_categorical_attn_can_implement_select_none(self, causal, input_seq,\n default):\n vocab = range(-20, 20)\n input_space = bases.VectorSpaceWithBasis.from_values(\"input\", vocab)\n\n output_dir = bases.BasisDirection(\"output\")\n output_space = bases.VectorSpaceWithBasis([output_dir])\n default_vec = default * output_space.vector_from_basis_direction(output_dir)\n\n bos_dir = bases.BasisDirection(\"bos_dimension\")\n bos_space = bases.VectorSpaceWithBasis([bos_dir])\n\n one_dir = bases.BasisDirection(\"one\")\n one_space = bases.VectorSpaceWithBasis([one_dir])\n\n value_dir = bases.BasisDirection(\"value\")\n value_space = bases.VectorSpaceWithBasis([value_dir])\n\n input_space = bases.join_vector_spaces(input_space, bos_space, one_space)\n value_space = bases.join_vector_spaces(value_space, bos_space)\n residual_space = bases.join_vector_spaces(input_space, value_space,\n output_space)\n value_vec = residual_space.vector_from_basis_direction(value_dir)\n bos_vec = residual_space.vector_from_basis_direction(bos_dir)\n one_vec = residual_space.vector_from_basis_direction(one_dir)\n\n attn = categorical_attn.categorical_attn(\n key_space=input_space,\n query_space=input_space,\n value_space=value_space,\n output_space=output_space,\n bos_space=bos_space,\n one_space=one_space,\n attn_fn=lambda x, y: False,\n default_output=default_vec,\n causal=causal,\n always_attend_to_bos=False,\n use_bos_for_default_output=True)\n\n def make_input(x):", "metadata": {"task_id": "deepmind--tracr/80", "ground_truth": " return (one_vec + x * value_vec +\n residual_space.vector_from_basis_direction(\n bases.BasisDirection(\"input\", x)))\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_attn_test.py"], "context_start_lineno": 0, "lineno": 137, "function_name": "make_input"}, "groundtruth": " return (one_vec + x * value_vec +\n residual_space.vector_from_basis_direction(\n bases.BasisDirection(\"input\", x)))\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLP to compute basic linear functions of one-hot encoded integers.\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\n\n_ONE_SPACE = bases.VectorSpaceWithBasis.from_names([\"one\"])\n\n\ndef map_categorical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection], bases.BasisDirection],\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any categorical function of a single variable f(x).\n\n The hidden layer is the identity and output combines this with a lookup table\n output_k = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: space containing the input x.\n output_space: space containing possible outputs.\n operation: A function operating on basis directions.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/81", "ground_truth": " def operation_fn(direction):\n if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_mlp.py"], "context_start_lineno": 0, "lineno": 43, "function_name": "map_categorical_mlp"}, "groundtruth": " def operation_fn(direction):\n if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLP to compute basic linear functions of one-hot encoded integers.\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\n\n_ONE_SPACE = bases.VectorSpaceWithBasis.from_names([\"one\"])\n\n\ndef map_categorical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection], bases.BasisDirection],\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any categorical function of a single variable f(x).\n\n The hidden layer is the identity and output combines this with a lookup table\n output_k = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: space containing the input x.\n output_space: space containing possible outputs.\n operation: A function operating on basis directions.\n \"\"\"\n\n def operation_fn(direction):", "metadata": {"task_id": "deepmind--tracr/82", "ground_truth": " if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_mlp.py"], "context_start_lineno": 0, "lineno": 44, "function_name": "operation_fn"}, "groundtruth": " if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLP to compute basic linear functions of one-hot encoded integers.\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\n\n_ONE_SPACE = bases.VectorSpaceWithBasis.from_names([\"one\"])\n\n\ndef map_categorical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection], bases.BasisDirection],\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any categorical function of a single variable f(x).\n\n The hidden layer is the identity and output combines this with a lookup table\n output_k = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: space containing the input x.\n output_space: space containing possible outputs.\n operation: A function operating on basis directions.\n \"\"\"\n\n def operation_fn(direction):\n if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef map_categorical_to_numerical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.Value], float],\n) -> transformers.MLP:\n \"\"\"Returns an MLP to compute f(x) from a categorical to a numerical variable.\n\n The hidden layer is the identity and output combines this with a lookup table\n output = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: Vector space containing the input x.\n output_space: Vector space to write the numerical output to.\n operation: A function operating on basis directions.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/83", "ground_truth": " bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n def operation_fn(direction):\n if direction in input_space:\n return operation(direction.value) * out_vec\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_mlp.py"], "context_start_lineno": 0, "lineno": 73, "function_name": "map_categorical_to_numerical_mlp"}, "groundtruth": " bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n def operation_fn(direction):\n if direction in input_space:\n return operation(direction.value) * out_vec\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLP to compute basic linear functions of one-hot encoded integers.\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\n\n_ONE_SPACE = bases.VectorSpaceWithBasis.from_names([\"one\"])\n\n\ndef map_categorical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection], bases.BasisDirection],\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any categorical function of a single variable f(x).\n\n The hidden layer is the identity and output combines this with a lookup table\n output_k = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: space containing the input x.\n output_space: space containing possible outputs.\n operation: A function operating on basis directions.\n \"\"\"\n\n def operation_fn(direction):\n if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef map_categorical_to_numerical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.Value], float],\n) -> transformers.MLP:\n \"\"\"Returns an MLP to compute f(x) from a categorical to a numerical variable.\n\n The hidden layer is the identity and output combines this with a lookup table\n output = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: Vector space containing the input x.\n output_space: Vector space to write the numerical output to.\n operation: A function operating on basis directions.\n \"\"\"\n bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n def operation_fn(direction):", "metadata": {"task_id": "deepmind--tracr/84", "ground_truth": " if direction in input_space:\n return operation(direction.value) * out_vec\n return output_space.null_vector()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_mlp.py"], "context_start_lineno": 0, "lineno": 77, "function_name": "operation_fn"}, "groundtruth": " if direction in input_space:\n return operation(direction.value) * out_vec\n return output_space.null_vector()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLP to compute basic linear functions of one-hot encoded integers.\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\n\n_ONE_SPACE = bases.VectorSpaceWithBasis.from_names([\"one\"])\n\n\ndef map_categorical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection], bases.BasisDirection],\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any categorical function of a single variable f(x).\n\n The hidden layer is the identity and output combines this with a lookup table\n output_k = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: space containing the input x.\n output_space: space containing possible outputs.\n operation: A function operating on basis directions.\n \"\"\"\n\n def operation_fn(direction):\n if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef map_categorical_to_numerical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.Value], float],\n) -> transformers.MLP:\n \"\"\"Returns an MLP to compute f(x) from a categorical to a numerical variable.\n\n The hidden layer is the identity and output combines this with a lookup table\n output = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: Vector space containing the input x.\n output_space: Vector space to write the numerical output to.\n operation: A function operating on basis directions.\n \"\"\"\n bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n def operation_fn(direction):\n if direction in input_space:\n return operation(direction.value) * out_vec\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef sequence_map_categorical_mlp(\n input1_space: bases.VectorSpaceWithBasis,\n input2_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection, bases.BasisDirection],\n bases.BasisDirection],\n one_space: bases.VectorSpaceWithBasis = _ONE_SPACE,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes a categorical function of two variables f(x, y).\n\n The hidden layer of the MLP computes the logical and of all input directions\n hidden_i_j = ReLU(x_i+x_j-1)\n\n And the output combines this with a lookup table\n output_k = sum(f(i, j)*hidden_i_j for all i,j in input space)\n\n Args:\n input1_space: Vector space containing the input x.\n input2_space: Vector space containing the input y.\n output_space: Vector space to write outputs to.\n operation: A function operating on basis directions.\n one_space: a reserved 1-d space that always contains a 1.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n if not set(input1_space.basis).isdisjoint(input2_space.basis):\n raise ValueError(\"Input spaces to a SequenceMap must be disjoint. \"\n \"If input spaces are the same, use Map instead!\")\n\n input_space = bases.direct_sum(input1_space, input2_space, one_space)\n\n def to_hidden(x, y):\n return bases.BasisDirection(hidden_name, (x.name, x.value, y.name, y.value))\n\n def from_hidden(h):", "metadata": {"task_id": "deepmind--tracr/85", "ground_truth": " x_name, x_value, y_name, y_value = h.value\n x_dir = bases.BasisDirection(x_name, x_value)\n y_dir = bases.BasisDirection(y_name, y_value)\n return x_dir, y_dir\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_mlp.py"], "context_start_lineno": 0, "lineno": 126, "function_name": "from_hidden"}, "groundtruth": " x_name, x_value, y_name, y_value = h.value\n x_dir = bases.BasisDirection(x_name, x_value)\n y_dir = bases.BasisDirection(y_name, y_value)\n return x_dir, y_dir\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLP to compute basic linear functions of one-hot encoded integers.\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\n\n_ONE_SPACE = bases.VectorSpaceWithBasis.from_names([\"one\"])\n\n\ndef map_categorical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection], bases.BasisDirection],\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any categorical function of a single variable f(x).\n\n The hidden layer is the identity and output combines this with a lookup table\n output_k = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: space containing the input x.\n output_space: space containing possible outputs.\n operation: A function operating on basis directions.\n \"\"\"\n\n def operation_fn(direction):\n if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef map_categorical_to_numerical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.Value], float],\n) -> transformers.MLP:\n \"\"\"Returns an MLP to compute f(x) from a categorical to a numerical variable.\n\n The hidden layer is the identity and output combines this with a lookup table\n output = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: Vector space containing the input x.\n output_space: Vector space to write the numerical output to.\n operation: A function operating on basis directions.\n \"\"\"\n bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n def operation_fn(direction):\n if direction in input_space:\n return operation(direction.value) * out_vec\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef sequence_map_categorical_mlp(\n input1_space: bases.VectorSpaceWithBasis,\n input2_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection, bases.BasisDirection],\n bases.BasisDirection],\n one_space: bases.VectorSpaceWithBasis = _ONE_SPACE,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes a categorical function of two variables f(x, y).\n\n The hidden layer of the MLP computes the logical and of all input directions\n hidden_i_j = ReLU(x_i+x_j-1)\n\n And the output combines this with a lookup table\n output_k = sum(f(i, j)*hidden_i_j for all i,j in input space)\n\n Args:\n input1_space: Vector space containing the input x.\n input2_space: Vector space containing the input y.\n output_space: Vector space to write outputs to.\n operation: A function operating on basis directions.\n one_space: a reserved 1-d space that always contains a 1.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n if not set(input1_space.basis).isdisjoint(input2_space.basis):\n raise ValueError(\"Input spaces to a SequenceMap must be disjoint. \"\n \"If input spaces are the same, use Map instead!\")\n\n input_space = bases.direct_sum(input1_space, input2_space, one_space)\n\n def to_hidden(x, y):\n return bases.BasisDirection(hidden_name, (x.name, x.value, y.name, y.value))\n\n def from_hidden(h):\n x_name, x_value, y_name, y_value = h.value\n x_dir = bases.BasisDirection(x_name, x_value)\n y_dir = bases.BasisDirection(y_name, y_value)\n return x_dir, y_dir\n\n hidden_dir = []\n for dir1 in input1_space.basis:\n for dir2 in input2_space.basis:\n hidden_dir.append(to_hidden(dir1, dir2))\n hidden_space = bases.VectorSpaceWithBasis(hidden_dir)\n\n def logical_and(direction):", "metadata": {"task_id": "deepmind--tracr/86", "ground_truth": " if direction in one_space:\n out = bases.VectorInBasis(hidden_space.basis,\n -np.ones(hidden_space.num_dims))\n elif direction in input1_space:\n dir1 = direction\n out = hidden_space.null_vector()\n for dir2 in input2_space.basis:\n out += hidden_space.vector_from_basis_direction(to_hidden(dir1, dir2))\n else:\n dir2 = direction\n out = hidden_space.null_vector()\n for dir1 in input1_space.basis:\n out += hidden_space.vector_from_basis_direction(to_hidden(dir1, dir2))\n return out\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_mlp.py"], "context_start_lineno": 0, "lineno": 138, "function_name": "logical_and"}, "groundtruth": " if direction in one_space:\n out = bases.VectorInBasis(hidden_space.basis,\n -np.ones(hidden_space.num_dims))\n elif direction in input1_space:\n dir1 = direction\n out = hidden_space.null_vector()\n for dir2 in input2_space.basis:\n out += hidden_space.vector_from_basis_direction(to_hidden(dir1, dir2))\n else:\n dir2 = direction\n out = hidden_space.null_vector()\n for dir1 in input1_space.basis:\n out += hidden_space.vector_from_basis_direction(to_hidden(dir1, dir2))\n return out\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"MLP to compute basic linear functions of one-hot encoded integers.\"\"\"\n\nfrom typing import Callable\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import transformers\nfrom tracr.craft import vectorspace_fns\n\n_ONE_SPACE = bases.VectorSpaceWithBasis.from_names([\"one\"])\n\n\ndef map_categorical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection], bases.BasisDirection],\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes any categorical function of a single variable f(x).\n\n The hidden layer is the identity and output combines this with a lookup table\n output_k = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: space containing the input x.\n output_space: space containing possible outputs.\n operation: A function operating on basis directions.\n \"\"\"\n\n def operation_fn(direction):\n if direction in input_space:\n output_direction = operation(direction)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef map_categorical_to_numerical_mlp(\n input_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.Value], float],\n) -> transformers.MLP:\n \"\"\"Returns an MLP to compute f(x) from a categorical to a numerical variable.\n\n The hidden layer is the identity and output combines this with a lookup table\n output = sum(f(i)*input_i for all i in input space)\n\n Args:\n input_space: Vector space containing the input x.\n output_space: Vector space to write the numerical output to.\n operation: A function operating on basis directions.\n \"\"\"\n bases.ensure_dims(output_space, num_dims=1, name=\"output_space\")\n out_vec = output_space.vector_from_basis_direction(output_space.basis[0])\n\n def operation_fn(direction):\n if direction in input_space:\n return operation(direction.value) * out_vec\n return output_space.null_vector()\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, output_space,\n operation_fn)\n\n second_layer = vectorspace_fns.project(output_space, output_space)\n\n return transformers.MLP(first_layer, second_layer)\n\n\ndef sequence_map_categorical_mlp(\n input1_space: bases.VectorSpaceWithBasis,\n input2_space: bases.VectorSpaceWithBasis,\n output_space: bases.VectorSpaceWithBasis,\n operation: Callable[[bases.BasisDirection, bases.BasisDirection],\n bases.BasisDirection],\n one_space: bases.VectorSpaceWithBasis = _ONE_SPACE,\n hidden_name: bases.Name = \"__hidden__\",\n) -> transformers.MLP:\n \"\"\"Returns an MLP that encodes a categorical function of two variables f(x, y).\n\n The hidden layer of the MLP computes the logical and of all input directions\n hidden_i_j = ReLU(x_i+x_j-1)\n\n And the output combines this with a lookup table\n output_k = sum(f(i, j)*hidden_i_j for all i,j in input space)\n\n Args:\n input1_space: Vector space containing the input x.\n input2_space: Vector space containing the input y.\n output_space: Vector space to write outputs to.\n operation: A function operating on basis directions.\n one_space: a reserved 1-d space that always contains a 1.\n hidden_name: Name for hidden dimensions.\n \"\"\"\n bases.ensure_dims(one_space, num_dims=1, name=\"one_space\")\n\n if not set(input1_space.basis).isdisjoint(input2_space.basis):\n raise ValueError(\"Input spaces to a SequenceMap must be disjoint. \"\n \"If input spaces are the same, use Map instead!\")\n\n input_space = bases.direct_sum(input1_space, input2_space, one_space)\n\n def to_hidden(x, y):\n return bases.BasisDirection(hidden_name, (x.name, x.value, y.name, y.value))\n\n def from_hidden(h):\n x_name, x_value, y_name, y_value = h.value\n x_dir = bases.BasisDirection(x_name, x_value)\n y_dir = bases.BasisDirection(y_name, y_value)\n return x_dir, y_dir\n\n hidden_dir = []\n for dir1 in input1_space.basis:\n for dir2 in input2_space.basis:\n hidden_dir.append(to_hidden(dir1, dir2))\n hidden_space = bases.VectorSpaceWithBasis(hidden_dir)\n\n def logical_and(direction):\n if direction in one_space:\n out = bases.VectorInBasis(hidden_space.basis,\n -np.ones(hidden_space.num_dims))\n elif direction in input1_space:\n dir1 = direction\n out = hidden_space.null_vector()\n for dir2 in input2_space.basis:\n out += hidden_space.vector_from_basis_direction(to_hidden(dir1, dir2))\n else:\n dir2 = direction\n out = hidden_space.null_vector()\n for dir1 in input1_space.basis:\n out += hidden_space.vector_from_basis_direction(to_hidden(dir1, dir2))\n return out\n\n first_layer = vectorspace_fns.Linear.from_action(input_space, hidden_space,\n logical_and)\n\n def operation_fn(direction):", "metadata": {"task_id": "deepmind--tracr/87", "ground_truth": " dir1, dir2 = from_hidden(direction)\n output_direction = operation(dir1, dir2)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n else:\n return output_space.null_vector()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "chamber", "categorical_mlp.py"], "context_start_lineno": 0, "lineno": 157, "function_name": "operation_fn"}, "groundtruth": " dir1, dir2 = from_hidden(direction)\n output_direction = operation(dir1, dir2)\n if output_direction in output_space:\n return output_space.vector_from_basis_direction(output_direction)\n else:\n return output_space.null_vector()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/88", "ground_truth": " self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 56, "function_name": "__init__"}, "groundtruth": " self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/89", "ground_truth": " if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 66, "function_name": "__call__"}, "groundtruth": " if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n\n @classmethod\n def from_action(\n cls,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection], VectorInBasis],\n ) -> \"Linear\":\n \"\"\"from_action(i, o)(action) creates a Linear.\"\"\"", "metadata": {"task_id": "deepmind--tracr/90", "ground_truth": " matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 82, "function_name": "from_action"}, "groundtruth": " matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n\n @classmethod\n def from_action(\n cls,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection], VectorInBasis],\n ) -> \"Linear\":\n \"\"\"from_action(i, o)(action) creates a Linear.\"\"\"\n\n matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n\n @classmethod\n def combine_in_parallel(cls, fns: Sequence[\"Linear\"]) -> \"Linear\":\n \"\"\"Combines multiple parallel linear functions into a single one.\"\"\"", "metadata": {"task_id": "deepmind--tracr/91", "ground_truth": " joint_input_space = bases.join_vector_spaces(\n *[fn.input_space for fn in fns])\n joint_output_space = bases.join_vector_spaces(\n *[fn.output_space for fn in fns])\n\n def action(x: bases.BasisDirection) -> bases.VectorInBasis:\n out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n\n return cls.from_action(joint_input_space, joint_output_space, action)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 95, "function_name": "combine_in_parallel"}, "groundtruth": " joint_input_space = bases.join_vector_spaces(\n *[fn.input_space for fn in fns])\n joint_output_space = bases.join_vector_spaces(\n *[fn.output_space for fn in fns])\n\n def action(x: bases.BasisDirection) -> bases.VectorInBasis:\n out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n\n return cls.from_action(joint_input_space, joint_output_space, action)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n\n @classmethod\n def from_action(\n cls,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection], VectorInBasis],\n ) -> \"Linear\":\n \"\"\"from_action(i, o)(action) creates a Linear.\"\"\"\n\n matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n\n @classmethod\n def combine_in_parallel(cls, fns: Sequence[\"Linear\"]) -> \"Linear\":\n \"\"\"Combines multiple parallel linear functions into a single one.\"\"\"\n joint_input_space = bases.join_vector_spaces(\n *[fn.input_space for fn in fns])\n joint_output_space = bases.join_vector_spaces(\n *[fn.output_space for fn in fns])\n\n def action(x: bases.BasisDirection) -> bases.VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/92", "ground_truth": " out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 101, "function_name": "action"}, "groundtruth": " out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n\n @classmethod\n def from_action(\n cls,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection], VectorInBasis],\n ) -> \"Linear\":\n \"\"\"from_action(i, o)(action) creates a Linear.\"\"\"\n\n matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n\n @classmethod\n def combine_in_parallel(cls, fns: Sequence[\"Linear\"]) -> \"Linear\":\n \"\"\"Combines multiple parallel linear functions into a single one.\"\"\"\n joint_input_space = bases.join_vector_spaces(\n *[fn.input_space for fn in fns])\n joint_output_space = bases.join_vector_spaces(\n *[fn.output_space for fn in fns])\n\n def action(x: bases.BasisDirection) -> bases.VectorInBasis:\n out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n\n return cls.from_action(joint_input_space, joint_output_space, action)\n\n\ndef project(\n from_space: VectorSpaceWithBasis,\n to_space: VectorSpaceWithBasis,\n) -> Linear:\n \"\"\"Creates a projection.\"\"\"", "metadata": {"task_id": "deepmind--tracr/93", "ground_truth": " def action(direction: bases.BasisDirection) -> VectorInBasis:\n if direction in to_space:\n return to_space.vector_from_basis_direction(direction)\n else:\n return to_space.null_vector()\n\n return Linear.from_action(from_space, to_space, action=action)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 117, "function_name": "project"}, "groundtruth": " def action(direction: bases.BasisDirection) -> VectorInBasis:\n if direction in to_space:\n return to_space.vector_from_basis_direction(direction)\n else:\n return to_space.null_vector()\n\n return Linear.from_action(from_space, to_space, action=action)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n\n @classmethod\n def from_action(\n cls,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection], VectorInBasis],\n ) -> \"Linear\":\n \"\"\"from_action(i, o)(action) creates a Linear.\"\"\"\n\n matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n\n @classmethod\n def combine_in_parallel(cls, fns: Sequence[\"Linear\"]) -> \"Linear\":\n \"\"\"Combines multiple parallel linear functions into a single one.\"\"\"\n joint_input_space = bases.join_vector_spaces(\n *[fn.input_space for fn in fns])\n joint_output_space = bases.join_vector_spaces(\n *[fn.output_space for fn in fns])\n\n def action(x: bases.BasisDirection) -> bases.VectorInBasis:\n out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n\n return cls.from_action(joint_input_space, joint_output_space, action)\n\n\ndef project(\n from_space: VectorSpaceWithBasis,\n to_space: VectorSpaceWithBasis,\n) -> Linear:\n \"\"\"Creates a projection.\"\"\"\n\n def action(direction: bases.BasisDirection) -> VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/94", "ground_truth": " if direction in to_space:\n return to_space.vector_from_basis_direction(direction)\n else:\n return to_space.null_vector()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 118, "function_name": "action"}, "groundtruth": " if direction in to_space:\n return to_space.vector_from_basis_direction(direction)\n else:\n return to_space.null_vector()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n\n @classmethod\n def from_action(\n cls,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection], VectorInBasis],\n ) -> \"Linear\":\n \"\"\"from_action(i, o)(action) creates a Linear.\"\"\"\n\n matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n\n @classmethod\n def combine_in_parallel(cls, fns: Sequence[\"Linear\"]) -> \"Linear\":\n \"\"\"Combines multiple parallel linear functions into a single one.\"\"\"\n joint_input_space = bases.join_vector_spaces(\n *[fn.input_space for fn in fns])\n joint_output_space = bases.join_vector_spaces(\n *[fn.output_space for fn in fns])\n\n def action(x: bases.BasisDirection) -> bases.VectorInBasis:\n out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n\n return cls.from_action(joint_input_space, joint_output_space, action)\n\n\ndef project(\n from_space: VectorSpaceWithBasis,\n to_space: VectorSpaceWithBasis,\n) -> Linear:\n \"\"\"Creates a projection.\"\"\"\n\n def action(direction: bases.BasisDirection) -> VectorInBasis:\n if direction in to_space:\n return to_space.vector_from_basis_direction(direction)\n else:\n return to_space.null_vector()\n\n return Linear.from_action(from_space, to_space, action=action)\n\n\n@dataclasses.dataclass\nclass ScalarBilinear:\n \"\"\"A scalar-valued bilinear operator.\"\"\"\n left_space: VectorSpaceWithBasis\n right_space: VectorSpaceWithBasis\n matrix: np.ndarray\n\n def __post_init__(self):\n \"\"\"Ensure matrix acts in sorted bases and typecheck sizes.\"\"\"", "metadata": {"task_id": "deepmind--tracr/95", "ground_truth": " left_size, right_size = self.matrix.shape\n assert left_size == self.left_space.num_dims\n assert right_size == self.right_space.num_dims\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 135, "function_name": "__post_init__"}, "groundtruth": " left_size, right_size = self.matrix.shape\n assert left_size == self.left_space.num_dims\n assert right_size == self.right_space.num_dims\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n\n @classmethod\n def from_action(\n cls,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection], VectorInBasis],\n ) -> \"Linear\":\n \"\"\"from_action(i, o)(action) creates a Linear.\"\"\"\n\n matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n\n @classmethod\n def combine_in_parallel(cls, fns: Sequence[\"Linear\"]) -> \"Linear\":\n \"\"\"Combines multiple parallel linear functions into a single one.\"\"\"\n joint_input_space = bases.join_vector_spaces(\n *[fn.input_space for fn in fns])\n joint_output_space = bases.join_vector_spaces(\n *[fn.output_space for fn in fns])\n\n def action(x: bases.BasisDirection) -> bases.VectorInBasis:\n out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n\n return cls.from_action(joint_input_space, joint_output_space, action)\n\n\ndef project(\n from_space: VectorSpaceWithBasis,\n to_space: VectorSpaceWithBasis,\n) -> Linear:\n \"\"\"Creates a projection.\"\"\"\n\n def action(direction: bases.BasisDirection) -> VectorInBasis:\n if direction in to_space:\n return to_space.vector_from_basis_direction(direction)\n else:\n return to_space.null_vector()\n\n return Linear.from_action(from_space, to_space, action=action)\n\n\n@dataclasses.dataclass\nclass ScalarBilinear:\n \"\"\"A scalar-valued bilinear operator.\"\"\"\n left_space: VectorSpaceWithBasis\n right_space: VectorSpaceWithBasis\n matrix: np.ndarray\n\n def __post_init__(self):\n \"\"\"Ensure matrix acts in sorted bases and typecheck sizes.\"\"\"\n left_size, right_size = self.matrix.shape\n assert left_size == self.left_space.num_dims\n assert right_size == self.right_space.num_dims\n\n def __call__(self, x: VectorInBasis, y: VectorInBasis) -> float:\n \"\"\"Describes the action of the operator on vectors.\"\"\"", "metadata": {"task_id": "deepmind--tracr/96", "ground_truth": " if x not in self.left_space:\n raise TypeError(f\"x={x} not in self.left_space={self.left_space}.\")\n if y not in self.right_space:\n raise TypeError(f\"y={y} not in self.right_space={self.right_space}.\")\n return (x.magnitudes.T @ self.matrix @ y.magnitudes).item()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 141, "function_name": "__call__"}, "groundtruth": " if x not in self.left_space:\n raise TypeError(f\"x={x} not in self.left_space={self.left_space}.\")\n if y not in self.right_space:\n raise TypeError(f\"y={y} not in self.right_space={self.right_space}.\")\n return (x.magnitudes.T @ self.matrix @ y.magnitudes).item()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functions on vector spaces.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Callable, Sequence\n\nimport numpy as np\n\nfrom tracr.craft import bases\n\nVectorSpaceWithBasis = bases.VectorSpaceWithBasis\nVectorInBasis = bases.VectorInBasis\nBasisDirection = bases.BasisDirection\n\n\nclass VectorFunction(abc.ABC):\n \"\"\"A function that acts on vectors.\"\"\"\n\n input_space: VectorSpaceWithBasis\n output_space: VectorSpaceWithBasis\n\n @abc.abstractmethod\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n \"\"\"Evaluates the function.\"\"\"\n\n\nclass Linear(VectorFunction):\n \"\"\"A linear function.\"\"\"\n\n def __init__(\n self,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n matrix: np.ndarray,\n ):\n \"\"\"Initialises.\n\n Args:\n input_space: The input vector space.\n output_space: The output vector space.\n matrix: a [input, output] matrix acting in a (sorted) basis.\n \"\"\"\n self.input_space = input_space\n self.output_space = output_space\n self.matrix = matrix\n\n def __post_init__(self) -> None:\n output_size, input_size = self.matrix.shape\n assert input_size == self.input_space.num_dims\n assert output_size == self.output_space.num_dims\n\n def __call__(self, x: VectorInBasis) -> VectorInBasis:\n if x not in self.input_space:\n raise TypeError(f\"x={x} not in self.input_space={self.input_space}.\")\n return VectorInBasis(\n basis_directions=sorted(self.output_space.basis),\n magnitudes=x.magnitudes @ self.matrix,\n )\n\n @classmethod\n def from_action(\n cls,\n input_space: VectorSpaceWithBasis,\n output_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection], VectorInBasis],\n ) -> \"Linear\":\n \"\"\"from_action(i, o)(action) creates a Linear.\"\"\"\n\n matrix = np.zeros((input_space.num_dims, output_space.num_dims))\n for i, direction in enumerate(input_space.basis):\n out_vector = action(direction)\n if out_vector not in output_space:\n raise TypeError(f\"image of {direction} from input_space={input_space} \"\n f\"is not in output_space={output_space}\")\n matrix[i, :] = out_vector.magnitudes\n\n return Linear(input_space, output_space, matrix)\n\n @classmethod\n def combine_in_parallel(cls, fns: Sequence[\"Linear\"]) -> \"Linear\":\n \"\"\"Combines multiple parallel linear functions into a single one.\"\"\"\n joint_input_space = bases.join_vector_spaces(\n *[fn.input_space for fn in fns])\n joint_output_space = bases.join_vector_spaces(\n *[fn.output_space for fn in fns])\n\n def action(x: bases.BasisDirection) -> bases.VectorInBasis:\n out = joint_output_space.null_vector()\n for fn in fns:\n if x in fn.input_space:\n x_vec = fn.input_space.vector_from_basis_direction(x)\n out += fn(x_vec).project(joint_output_space)\n return out\n\n return cls.from_action(joint_input_space, joint_output_space, action)\n\n\ndef project(\n from_space: VectorSpaceWithBasis,\n to_space: VectorSpaceWithBasis,\n) -> Linear:\n \"\"\"Creates a projection.\"\"\"\n\n def action(direction: bases.BasisDirection) -> VectorInBasis:\n if direction in to_space:\n return to_space.vector_from_basis_direction(direction)\n else:\n return to_space.null_vector()\n\n return Linear.from_action(from_space, to_space, action=action)\n\n\n@dataclasses.dataclass\nclass ScalarBilinear:\n \"\"\"A scalar-valued bilinear operator.\"\"\"\n left_space: VectorSpaceWithBasis\n right_space: VectorSpaceWithBasis\n matrix: np.ndarray\n\n def __post_init__(self):\n \"\"\"Ensure matrix acts in sorted bases and typecheck sizes.\"\"\"\n left_size, right_size = self.matrix.shape\n assert left_size == self.left_space.num_dims\n assert right_size == self.right_space.num_dims\n\n def __call__(self, x: VectorInBasis, y: VectorInBasis) -> float:\n \"\"\"Describes the action of the operator on vectors.\"\"\"\n if x not in self.left_space:\n raise TypeError(f\"x={x} not in self.left_space={self.left_space}.\")\n if y not in self.right_space:\n raise TypeError(f\"y={y} not in self.right_space={self.right_space}.\")\n return (x.magnitudes.T @ self.matrix @ y.magnitudes).item()\n\n @classmethod\n def from_action(\n cls,\n left_space: VectorSpaceWithBasis,\n right_space: VectorSpaceWithBasis,\n action: Callable[[BasisDirection, BasisDirection], float],\n ) -> \"ScalarBilinear\":\n \"\"\"from_action(l, r)(action) creates a ScalarBilinear.\"\"\"", "metadata": {"task_id": "deepmind--tracr/97", "ground_truth": " matrix = np.zeros((left_space.num_dims, right_space.num_dims))\n for i, left_direction in enumerate(left_space.basis):\n for j, right_direction in enumerate(right_space.basis):\n matrix[i, j] = action(left_direction, right_direction)\n\n return ScalarBilinear(left_space, right_space, matrix)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "vectorspace_fns.py"], "context_start_lineno": 0, "lineno": 156, "function_name": "from_action"}, "groundtruth": " matrix = np.zeros((left_space.num_dims, right_space.num_dims))\n for i, left_direction in enumerate(left_space.basis):\n for j, right_direction in enumerate(right_space.basis):\n matrix[i, j] = action(left_direction, right_direction)\n\n return ScalarBilinear(left_space, right_space, matrix)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):", "metadata": {"task_id": "deepmind--tracr/98", "ground_truth": " if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 41, "function_name": "__str__"}, "groundtruth": " if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:", "metadata": {"task_id": "deepmind--tracr/99", "ground_truth": " try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 46, "function_name": "__lt__"}, "groundtruth": " try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"", "metadata": {"task_id": "deepmind--tracr/100", "ground_truth": " if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 65, "function_name": "__post_init__"}, "groundtruth": " if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"\n if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n\n def __add__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":", "metadata": {"task_id": "deepmind--tracr/101", "ground_truth": " if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 76, "function_name": "__add__"}, "groundtruth": " if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"\n if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n\n def __add__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __radd__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {other} + {self}\")\n return self + other\n\n def __sub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":", "metadata": {"task_id": "deepmind--tracr/102", "ground_truth": " if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {self} - {other}\")\n magnitudes = self.magnitudes - other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 87, "function_name": "__sub__"}, "groundtruth": " if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {self} - {other}\")\n magnitudes = self.magnitudes - other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"\n if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n\n def __add__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __radd__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {other} + {self}\")\n return self + other\n\n def __sub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {self} - {other}\")\n magnitudes = self.magnitudes - other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __rsub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {other} - {self}\")\n magnitudes = other.magnitudes - self.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __mul__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes * scalar)\n\n def __rmul__(self, scalar: float) -> \"VectorInBasis\":\n return self * scalar\n\n def __truediv__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes / scalar)\n\n def __neg__(self) -> \"VectorInBasis\":\n return (-1) * self\n\n def __eq__(self, other: \"VectorInBasis\") -> bool:", "metadata": {"task_id": "deepmind--tracr/103", "ground_truth": " return ((self.basis_directions == other.basis_directions) and\n (self.magnitudes.shape == other.magnitudes.shape) and\n (np.all(self.magnitudes == other.magnitudes)))\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 111, "function_name": "__eq__"}, "groundtruth": " return ((self.basis_directions == other.basis_directions) and\n (self.magnitudes.shape == other.magnitudes.shape) and\n (np.all(self.magnitudes == other.magnitudes)))\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"\n if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n\n def __add__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __radd__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {other} + {self}\")\n return self + other\n\n def __sub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {self} - {other}\")\n magnitudes = self.magnitudes - other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __rsub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {other} - {self}\")\n magnitudes = other.magnitudes - self.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __mul__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes * scalar)\n\n def __rmul__(self, scalar: float) -> \"VectorInBasis\":\n return self * scalar\n\n def __truediv__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes / scalar)\n\n def __neg__(self) -> \"VectorInBasis\":\n return (-1) * self\n\n def __eq__(self, other: \"VectorInBasis\") -> bool:\n return ((self.basis_directions == other.basis_directions) and\n (self.magnitudes.shape == other.magnitudes.shape) and\n (np.all(self.magnitudes == other.magnitudes)))\n\n @classmethod\n def sum(cls, vectors: Sequence[\"VectorInBasis\"]) -> \"VectorInBasis\":\n return cls(vectors[0].basis_directions,\n np.sum([x.magnitudes for x in vectors], axis=0))\n\n @classmethod\n def stack(cls,\n vectors: Sequence[\"VectorInBasis\"],\n axis: int = 0) -> \"VectorInBasis\":", "metadata": {"task_id": "deepmind--tracr/104", "ground_truth": " for v in vectors[1:]:\n if v.basis_directions != vectors[0].basis_directions:\n raise TypeError(f\"Stacking incompatible bases: {vectors[0]} + {v}\")\n return cls(vectors[0].basis_directions,\n np.stack([v.magnitudes for v in vectors], axis=axis))\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 124, "function_name": "stack"}, "groundtruth": " for v in vectors[1:]:\n if v.basis_directions != vectors[0].basis_directions:\n raise TypeError(f\"Stacking incompatible bases: {vectors[0]} + {v}\")\n return cls(vectors[0].basis_directions,\n np.stack([v.magnitudes for v in vectors], axis=axis))\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"\n if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n\n def __add__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __radd__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {other} + {self}\")\n return self + other\n\n def __sub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {self} - {other}\")\n magnitudes = self.magnitudes - other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __rsub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {other} - {self}\")\n magnitudes = other.magnitudes - self.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __mul__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes * scalar)\n\n def __rmul__(self, scalar: float) -> \"VectorInBasis\":\n return self * scalar\n\n def __truediv__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes / scalar)\n\n def __neg__(self) -> \"VectorInBasis\":\n return (-1) * self\n\n def __eq__(self, other: \"VectorInBasis\") -> bool:\n return ((self.basis_directions == other.basis_directions) and\n (self.magnitudes.shape == other.magnitudes.shape) and\n (np.all(self.magnitudes == other.magnitudes)))\n\n @classmethod\n def sum(cls, vectors: Sequence[\"VectorInBasis\"]) -> \"VectorInBasis\":\n return cls(vectors[0].basis_directions,\n np.sum([x.magnitudes for x in vectors], axis=0))\n\n @classmethod\n def stack(cls,\n vectors: Sequence[\"VectorInBasis\"],\n axis: int = 0) -> \"VectorInBasis\":\n for v in vectors[1:]:\n if v.basis_directions != vectors[0].basis_directions:\n raise TypeError(f\"Stacking incompatible bases: {vectors[0]} + {v}\")\n return cls(vectors[0].basis_directions,\n np.stack([v.magnitudes for v in vectors], axis=axis))\n\n def project(\n self, basis: Union[\"VectorSpaceWithBasis\", Sequence[BasisDirection]]\n ) -> \"VectorInBasis\":\n \"\"\"Projects to the basis.\"\"\"", "metadata": {"task_id": "deepmind--tracr/105", "ground_truth": " if isinstance(basis, VectorSpaceWithBasis):\n basis = basis.basis\n components = []\n for direction in basis:\n if direction in self.basis_directions:\n components.append(\n self.magnitudes[..., self.basis_directions.index(direction)])\n else:\n components.append(np.zeros_like(self.magnitudes[..., 0]))\n return VectorInBasis(list(basis), np.stack(components, axis=-1))\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 134, "function_name": "project"}, "groundtruth": " if isinstance(basis, VectorSpaceWithBasis):\n basis = basis.basis\n components = []\n for direction in basis:\n if direction in self.basis_directions:\n components.append(\n self.magnitudes[..., self.basis_directions.index(direction)])\n else:\n components.append(np.zeros_like(self.magnitudes[..., 0]))\n return VectorInBasis(list(basis), np.stack(components, axis=-1))\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"\n if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n\n def __add__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __radd__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {other} + {self}\")\n return self + other\n\n def __sub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {self} - {other}\")\n magnitudes = self.magnitudes - other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __rsub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {other} - {self}\")\n magnitudes = other.magnitudes - self.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __mul__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes * scalar)\n\n def __rmul__(self, scalar: float) -> \"VectorInBasis\":\n return self * scalar\n\n def __truediv__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes / scalar)\n\n def __neg__(self) -> \"VectorInBasis\":\n return (-1) * self\n\n def __eq__(self, other: \"VectorInBasis\") -> bool:\n return ((self.basis_directions == other.basis_directions) and\n (self.magnitudes.shape == other.magnitudes.shape) and\n (np.all(self.magnitudes == other.magnitudes)))\n\n @classmethod\n def sum(cls, vectors: Sequence[\"VectorInBasis\"]) -> \"VectorInBasis\":\n return cls(vectors[0].basis_directions,\n np.sum([x.magnitudes for x in vectors], axis=0))\n\n @classmethod\n def stack(cls,\n vectors: Sequence[\"VectorInBasis\"],\n axis: int = 0) -> \"VectorInBasis\":\n for v in vectors[1:]:\n if v.basis_directions != vectors[0].basis_directions:\n raise TypeError(f\"Stacking incompatible bases: {vectors[0]} + {v}\")\n return cls(vectors[0].basis_directions,\n np.stack([v.magnitudes for v in vectors], axis=axis))\n\n def project(\n self, basis: Union[\"VectorSpaceWithBasis\", Sequence[BasisDirection]]\n ) -> \"VectorInBasis\":\n \"\"\"Projects to the basis.\"\"\"\n if isinstance(basis, VectorSpaceWithBasis):\n basis = basis.basis\n components = []\n for direction in basis:\n if direction in self.basis_directions:\n components.append(\n self.magnitudes[..., self.basis_directions.index(direction)])\n else:\n components.append(np.zeros_like(self.magnitudes[..., 0]))\n return VectorInBasis(list(basis), np.stack(components, axis=-1))\n\n\n@dataclasses.dataclass\nclass VectorSpaceWithBasis:\n \"\"\"A vector subspace in a given basis.\"\"\"\n basis: Sequence[BasisDirection]\n\n def __post_init__(self):\n \"\"\"Keep basis directions sorted.\"\"\"\n self.basis = sorted(self.basis)\n\n @property\n def num_dims(self) -> int:\n return len(self.basis)\n\n def __contains__(self, item: Union[VectorInBasis, BasisDirection]) -> bool:", "metadata": {"task_id": "deepmind--tracr/106", "ground_truth": " if isinstance(item, BasisDirection):\n return item in self.basis\n\n return set(self.basis) == set(item.basis_directions)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 160, "function_name": "__contains__"}, "groundtruth": " if isinstance(item, BasisDirection):\n return item in self.basis\n\n return set(self.basis) == set(item.basis_directions)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"\n if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n\n def __add__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __radd__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {other} + {self}\")\n return self + other\n\n def __sub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {self} - {other}\")\n magnitudes = self.magnitudes - other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __rsub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {other} - {self}\")\n magnitudes = other.magnitudes - self.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __mul__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes * scalar)\n\n def __rmul__(self, scalar: float) -> \"VectorInBasis\":\n return self * scalar\n\n def __truediv__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes / scalar)\n\n def __neg__(self) -> \"VectorInBasis\":\n return (-1) * self\n\n def __eq__(self, other: \"VectorInBasis\") -> bool:\n return ((self.basis_directions == other.basis_directions) and\n (self.magnitudes.shape == other.magnitudes.shape) and\n (np.all(self.magnitudes == other.magnitudes)))\n\n @classmethod\n def sum(cls, vectors: Sequence[\"VectorInBasis\"]) -> \"VectorInBasis\":\n return cls(vectors[0].basis_directions,\n np.sum([x.magnitudes for x in vectors], axis=0))\n\n @classmethod\n def stack(cls,\n vectors: Sequence[\"VectorInBasis\"],\n axis: int = 0) -> \"VectorInBasis\":\n for v in vectors[1:]:\n if v.basis_directions != vectors[0].basis_directions:\n raise TypeError(f\"Stacking incompatible bases: {vectors[0]} + {v}\")\n return cls(vectors[0].basis_directions,\n np.stack([v.magnitudes for v in vectors], axis=axis))\n\n def project(\n self, basis: Union[\"VectorSpaceWithBasis\", Sequence[BasisDirection]]\n ) -> \"VectorInBasis\":\n \"\"\"Projects to the basis.\"\"\"\n if isinstance(basis, VectorSpaceWithBasis):\n basis = basis.basis\n components = []\n for direction in basis:\n if direction in self.basis_directions:\n components.append(\n self.magnitudes[..., self.basis_directions.index(direction)])\n else:\n components.append(np.zeros_like(self.magnitudes[..., 0]))\n return VectorInBasis(list(basis), np.stack(components, axis=-1))\n\n\n@dataclasses.dataclass\nclass VectorSpaceWithBasis:\n \"\"\"A vector subspace in a given basis.\"\"\"\n basis: Sequence[BasisDirection]\n\n def __post_init__(self):\n \"\"\"Keep basis directions sorted.\"\"\"\n self.basis = sorted(self.basis)\n\n @property\n def num_dims(self) -> int:\n return len(self.basis)\n\n def __contains__(self, item: Union[VectorInBasis, BasisDirection]) -> bool:\n if isinstance(item, BasisDirection):\n return item in self.basis\n\n return set(self.basis) == set(item.basis_directions)\n\n def issubspace(self, other: \"VectorSpaceWithBasis\") -> bool:\n return set(self.basis).issubset(set(other.basis))\n\n def basis_vectors(self) -> Sequence[VectorInBasis]:\n basis_vector_magnitudes = list(np.eye(self.num_dims))\n return [VectorInBasis(self.basis, m) for m in basis_vector_magnitudes]\n\n def vector_from_basis_direction(\n self, basis_direction: BasisDirection) -> VectorInBasis:\n i = self.basis.index(basis_direction)\n return VectorInBasis(self.basis, np.eye(self.num_dims)[i])\n\n def null_vector(self) -> VectorInBasis:\n return VectorInBasis(self.basis, np.zeros(self.num_dims))\n\n @classmethod\n def from_names(cls, names: Sequence[Name]) -> \"VectorSpaceWithBasis\":\n \"\"\"Creates a VectorSpace from a list of names for its basis directions.\"\"\"\n return cls([BasisDirection(n) for n in names])\n\n @classmethod\n def from_values(\n cls,\n name: Name,\n values: Iterable[Value],\n ) -> \"VectorSpaceWithBasis\":\n \"\"\"Creates a VectorSpace from a list of values for its basis directions.\"\"\"\n return cls([BasisDirection(name, v) for v in values])\n\n\ndef direct_sum(*vs: VectorSpaceWithBasis) -> VectorSpaceWithBasis:\n \"\"\"Create a direct sum of the vector spaces.\n\n Assumes the basis elements of all input vector spaces are\n orthogonal to each other. Maintains the order of the bases.\n\n Args:\n *vs: the vector spaces to sum.\n\n Returns:\n the combined vector space.\n\n Raises:\n Value error in case of overlapping bases.\n \"\"\"\n # Take the union of all the bases:", "metadata": {"task_id": "deepmind--tracr/107", "ground_truth": " total_basis = sum([v.basis for v in vs], [])\n\n if len(total_basis) != len(set(total_basis)):\n raise ValueError(\"Overlapping bases!\")\n\n return VectorSpaceWithBasis(total_basis)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 211, "function_name": "direct_sum"}, "groundtruth": " total_basis = sum([v.basis for v in vs], [])\n\n if len(total_basis) != len(set(total_basis)):\n raise ValueError(\"Overlapping bases!\")\n\n return VectorSpaceWithBasis(total_basis)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Vectors and bases.\"\"\"\n\nimport dataclasses\nfrom typing import Sequence, Union, Optional, Iterable\n\nimport numpy as np\n\nName = Union[int, str]\nValue = Union[int, float, bool, str, tuple]\n\n\n@dataclasses.dataclass(frozen=True)\nclass BasisDirection:\n \"\"\"Represents a basis direction (no magnitude) in a vector space.\n\n Attributes:\n name: a unique name for this direction.\n value: used to hold a value one-hot-encoded by this direction. e.g.,\n [BasisDirection(\"vs_1\", True), BasisDirection(\"vs_1\", False)] would be\n basis directions of a subspace called \"vs_1\" which one-hot-encodes the\n values True and False. If provided, considered part of the name for the\n purpose of disambiguating directions.\n \"\"\"\n name: Name\n value: Optional[Value] = None\n\n def __str__(self):\n if self.value is None:\n return str(self.name)\n return f\"{self.name}:{self.value}\"\n\n def __lt__(self, other: \"BasisDirection\") -> bool:\n try:\n return (self.name, self.value) < (other.name, other.value)\n except TypeError:\n return str(self) < str(other)\n\n\n@dataclasses.dataclass\nclass VectorInBasis:\n \"\"\"A vector (or array of vectors) in a given basis.\n\n When magnitudes are 1-d, this is a vector.\n When magnitudes are (n+1)-d, this is an array of vectors,\n where the -1th dimension is the basis dimension.\n \"\"\"\n basis_directions: Sequence[BasisDirection]\n magnitudes: np.ndarray\n\n def __post_init__(self):\n \"\"\"Sort basis directions.\"\"\"\n if len(self.basis_directions) != self.magnitudes.shape[-1]:\n raise ValueError(\n \"Last dimension of magnitudes must be the same as number \"\n f\"of basis directions. Was {len(self.basis_directions)} \"\n f\"and {self.magnitudes.shape[-1]}.\")\n\n sort_idx = np.argsort(self.basis_directions)\n self.basis_directions = [self.basis_directions[i] for i in sort_idx]\n self.magnitudes = np.take(self.magnitudes, sort_idx, -1)\n\n def __add__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {self} + {other}\")\n magnitudes = self.magnitudes + other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __radd__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Adding incompatible bases: {other} + {self}\")\n return self + other\n\n def __sub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {self} - {other}\")\n magnitudes = self.magnitudes - other.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __rsub__(self, other: \"VectorInBasis\") -> \"VectorInBasis\":\n if self.basis_directions != other.basis_directions:\n raise TypeError(f\"Subtracting incompatible bases: {other} - {self}\")\n magnitudes = other.magnitudes - self.magnitudes\n return VectorInBasis(self.basis_directions, magnitudes)\n\n def __mul__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes * scalar)\n\n def __rmul__(self, scalar: float) -> \"VectorInBasis\":\n return self * scalar\n\n def __truediv__(self, scalar: float) -> \"VectorInBasis\":\n return VectorInBasis(self.basis_directions, self.magnitudes / scalar)\n\n def __neg__(self) -> \"VectorInBasis\":\n return (-1) * self\n\n def __eq__(self, other: \"VectorInBasis\") -> bool:\n return ((self.basis_directions == other.basis_directions) and\n (self.magnitudes.shape == other.magnitudes.shape) and\n (np.all(self.magnitudes == other.magnitudes)))\n\n @classmethod\n def sum(cls, vectors: Sequence[\"VectorInBasis\"]) -> \"VectorInBasis\":\n return cls(vectors[0].basis_directions,\n np.sum([x.magnitudes for x in vectors], axis=0))\n\n @classmethod\n def stack(cls,\n vectors: Sequence[\"VectorInBasis\"],\n axis: int = 0) -> \"VectorInBasis\":\n for v in vectors[1:]:\n if v.basis_directions != vectors[0].basis_directions:\n raise TypeError(f\"Stacking incompatible bases: {vectors[0]} + {v}\")\n return cls(vectors[0].basis_directions,\n np.stack([v.magnitudes for v in vectors], axis=axis))\n\n def project(\n self, basis: Union[\"VectorSpaceWithBasis\", Sequence[BasisDirection]]\n ) -> \"VectorInBasis\":\n \"\"\"Projects to the basis.\"\"\"\n if isinstance(basis, VectorSpaceWithBasis):\n basis = basis.basis\n components = []\n for direction in basis:\n if direction in self.basis_directions:\n components.append(\n self.magnitudes[..., self.basis_directions.index(direction)])\n else:\n components.append(np.zeros_like(self.magnitudes[..., 0]))\n return VectorInBasis(list(basis), np.stack(components, axis=-1))\n\n\n@dataclasses.dataclass\nclass VectorSpaceWithBasis:\n \"\"\"A vector subspace in a given basis.\"\"\"\n basis: Sequence[BasisDirection]\n\n def __post_init__(self):\n \"\"\"Keep basis directions sorted.\"\"\"\n self.basis = sorted(self.basis)\n\n @property\n def num_dims(self) -> int:\n return len(self.basis)\n\n def __contains__(self, item: Union[VectorInBasis, BasisDirection]) -> bool:\n if isinstance(item, BasisDirection):\n return item in self.basis\n\n return set(self.basis) == set(item.basis_directions)\n\n def issubspace(self, other: \"VectorSpaceWithBasis\") -> bool:\n return set(self.basis).issubset(set(other.basis))\n\n def basis_vectors(self) -> Sequence[VectorInBasis]:\n basis_vector_magnitudes = list(np.eye(self.num_dims))\n return [VectorInBasis(self.basis, m) for m in basis_vector_magnitudes]\n\n def vector_from_basis_direction(\n self, basis_direction: BasisDirection) -> VectorInBasis:\n i = self.basis.index(basis_direction)\n return VectorInBasis(self.basis, np.eye(self.num_dims)[i])\n\n def null_vector(self) -> VectorInBasis:\n return VectorInBasis(self.basis, np.zeros(self.num_dims))\n\n @classmethod\n def from_names(cls, names: Sequence[Name]) -> \"VectorSpaceWithBasis\":\n \"\"\"Creates a VectorSpace from a list of names for its basis directions.\"\"\"\n return cls([BasisDirection(n) for n in names])\n\n @classmethod\n def from_values(\n cls,\n name: Name,\n values: Iterable[Value],\n ) -> \"VectorSpaceWithBasis\":\n \"\"\"Creates a VectorSpace from a list of values for its basis directions.\"\"\"\n return cls([BasisDirection(name, v) for v in values])\n\n\ndef direct_sum(*vs: VectorSpaceWithBasis) -> VectorSpaceWithBasis:\n \"\"\"Create a direct sum of the vector spaces.\n\n Assumes the basis elements of all input vector spaces are\n orthogonal to each other. Maintains the order of the bases.\n\n Args:\n *vs: the vector spaces to sum.\n\n Returns:\n the combined vector space.\n\n Raises:\n Value error in case of overlapping bases.\n \"\"\"\n # Take the union of all the bases:\n total_basis = sum([v.basis for v in vs], [])\n\n if len(total_basis) != len(set(total_basis)):\n raise ValueError(\"Overlapping bases!\")\n\n return VectorSpaceWithBasis(total_basis)\n\n\ndef join_vector_spaces(*vs: VectorSpaceWithBasis) -> VectorSpaceWithBasis:\n \"\"\"Joins a set of vector spaces allowing them to overlap.\n\n Assumes the basis elements of all input vector spaces are\n orthogonal to each other. Does not maintain the order of the bases but\n sorts them.\n\n Args:\n *vs: the vector spaces to sum.\n\n Returns:\n the combined vector space.\n \"\"\"\n # Take the union of all the bases:", "metadata": {"task_id": "deepmind--tracr/108", "ground_truth": " total_basis = list(set().union(*[set(v.basis) for v in vs]))\n total_basis = sorted(total_basis)\n return VectorSpaceWithBasis(total_basis)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "bases.py"], "context_start_lineno": 0, "lineno": 233, "function_name": "join_vector_spaces"}, "groundtruth": " total_basis = list(set().union(*[set(v.basis) for v in vs]))\n total_basis = sorted(total_basis)\n return VectorSpaceWithBasis(total_basis)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"", "metadata": {"task_id": "deepmind--tracr/109", "ground_truth": " if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 65, "function_name": "__post_init__"}, "groundtruth": " if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/110", "ground_truth": " assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 77, "function_name": "apply"}, "groundtruth": " assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n\n def w_ov_residual(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Wov but acting on the residual space.\"\"\"", "metadata": {"task_id": "deepmind--tracr/111", "ground_truth": " x = project(self.residual_space, self.w_ov.input_space)(x)\n out = self.w_ov(x)\n return project(self.w_ov.output_space, self.residual_space)(out)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 98, "function_name": "w_ov_residual"}, "groundtruth": " x = project(self.residual_space, self.w_ov.input_space)(x)\n out = self.w_ov(x)\n return project(self.w_ov.output_space, self.residual_space)(out)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n\n def w_ov_residual(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Wov but acting on the residual space.\"\"\"\n x = project(self.residual_space, self.w_ov.input_space)(x)\n out = self.w_ov(x)\n return project(self.w_ov.output_space, self.residual_space)(out)\n\n @property\n def num_heads(self) -> int:\n return 1\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return MultiAttentionHead([self])\n\n\n@dataclasses.dataclass\nclass MultiAttentionHead(Block):\n \"\"\"Applies attention heads in parallel.\"\"\"\n sub_blocks: List[Union[AttentionHead, \"MultiAttentionHead\"]]\n\n def __post_init__(self):", "metadata": {"task_id": "deepmind--tracr/112", "ground_truth": " spaces = [block.residual_space for block in self.sub_blocks]\n self.residual_space, *others = spaces\n assert all(s == self.residual_space for s in others)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 116, "function_name": "__post_init__"}, "groundtruth": " spaces = [block.residual_space for block in self.sub_blocks]\n self.residual_space, *others = spaces\n assert all(s == self.residual_space for s in others)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n\n def w_ov_residual(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Wov but acting on the residual space.\"\"\"\n x = project(self.residual_space, self.w_ov.input_space)(x)\n out = self.w_ov(x)\n return project(self.w_ov.output_space, self.residual_space)(out)\n\n @property\n def num_heads(self) -> int:\n return 1\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return MultiAttentionHead([self])\n\n\n@dataclasses.dataclass\nclass MultiAttentionHead(Block):\n \"\"\"Applies attention heads in parallel.\"\"\"\n sub_blocks: List[Union[AttentionHead, \"MultiAttentionHead\"]]\n\n def __post_init__(self):\n spaces = [block.residual_space for block in self.sub_blocks]\n self.residual_space, *others = spaces\n assert all(s == self.residual_space for s in others)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n # each element is seq_len x embedding\n outs = [block.apply(x) for block in self.sub_blocks]\n return bases.VectorInBasis.sum(outs) # seq_len x embedding\n\n @property\n def num_heads(self) -> int:\n return sum(sub_block.num_heads for sub_block in self.sub_blocks)\n\n def heads(self) -> Iterable[AttentionHead]:", "metadata": {"task_id": "deepmind--tracr/113", "ground_truth": " for sub_block in self.sub_blocks:\n if isinstance(sub_block, AttentionHead):\n yield sub_block\n elif isinstance(sub_block, MultiAttentionHead):\n yield from sub_block.heads()\n else:\n raise NotImplementedError()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 130, "function_name": "heads"}, "groundtruth": " for sub_block in self.sub_blocks:\n if isinstance(sub_block, AttentionHead):\n yield sub_block\n elif isinstance(sub_block, MultiAttentionHead):\n yield from sub_block.heads()\n else:\n raise NotImplementedError()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n\n def w_ov_residual(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Wov but acting on the residual space.\"\"\"\n x = project(self.residual_space, self.w_ov.input_space)(x)\n out = self.w_ov(x)\n return project(self.w_ov.output_space, self.residual_space)(out)\n\n @property\n def num_heads(self) -> int:\n return 1\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return MultiAttentionHead([self])\n\n\n@dataclasses.dataclass\nclass MultiAttentionHead(Block):\n \"\"\"Applies attention heads in parallel.\"\"\"\n sub_blocks: List[Union[AttentionHead, \"MultiAttentionHead\"]]\n\n def __post_init__(self):\n spaces = [block.residual_space for block in self.sub_blocks]\n self.residual_space, *others = spaces\n assert all(s == self.residual_space for s in others)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n # each element is seq_len x embedding\n outs = [block.apply(x) for block in self.sub_blocks]\n return bases.VectorInBasis.sum(outs) # seq_len x embedding\n\n @property\n def num_heads(self) -> int:\n return sum(sub_block.num_heads for sub_block in self.sub_blocks)\n\n def heads(self) -> Iterable[AttentionHead]:\n for sub_block in self.sub_blocks:\n if isinstance(sub_block, AttentionHead):\n yield sub_block\n elif isinstance(sub_block, MultiAttentionHead):\n yield from sub_block.heads()\n else:\n raise NotImplementedError()\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return self\n\n\n@dataclasses.dataclass\nclass MLP(Block):\n \"\"\"A transformer MLP block.\"\"\"\n fst: vectorspace_fns.Linear\n snd: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n\n def __post_init__(self):\n \"\"\"Typecheck subspaces.\"\"\"", "metadata": {"task_id": "deepmind--tracr/114", "ground_truth": " if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.fst.input_space,\n self.snd.output_space)\n\n assert self.fst.output_space == self.snd.input_space\n assert self.fst.input_space.issubspace(self.residual_space)\n assert self.snd.output_space.issubspace(self.residual_space)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 151, "function_name": "__post_init__"}, "groundtruth": " if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.fst.input_space,\n self.snd.output_space)\n\n assert self.fst.output_space == self.snd.input_space\n assert self.fst.input_space.issubspace(self.residual_space)\n assert self.snd.output_space.issubspace(self.residual_space)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n\n def w_ov_residual(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Wov but acting on the residual space.\"\"\"\n x = project(self.residual_space, self.w_ov.input_space)(x)\n out = self.w_ov(x)\n return project(self.w_ov.output_space, self.residual_space)(out)\n\n @property\n def num_heads(self) -> int:\n return 1\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return MultiAttentionHead([self])\n\n\n@dataclasses.dataclass\nclass MultiAttentionHead(Block):\n \"\"\"Applies attention heads in parallel.\"\"\"\n sub_blocks: List[Union[AttentionHead, \"MultiAttentionHead\"]]\n\n def __post_init__(self):\n spaces = [block.residual_space for block in self.sub_blocks]\n self.residual_space, *others = spaces\n assert all(s == self.residual_space for s in others)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n # each element is seq_len x embedding\n outs = [block.apply(x) for block in self.sub_blocks]\n return bases.VectorInBasis.sum(outs) # seq_len x embedding\n\n @property\n def num_heads(self) -> int:\n return sum(sub_block.num_heads for sub_block in self.sub_blocks)\n\n def heads(self) -> Iterable[AttentionHead]:\n for sub_block in self.sub_blocks:\n if isinstance(sub_block, AttentionHead):\n yield sub_block\n elif isinstance(sub_block, MultiAttentionHead):\n yield from sub_block.heads()\n else:\n raise NotImplementedError()\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return self\n\n\n@dataclasses.dataclass\nclass MLP(Block):\n \"\"\"A transformer MLP block.\"\"\"\n fst: vectorspace_fns.Linear\n snd: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n\n def __post_init__(self):\n \"\"\"Typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.fst.input_space,\n self.snd.output_space)\n\n assert self.fst.output_space == self.snd.input_space\n assert self.fst.input_space.issubspace(self.residual_space)\n assert self.snd.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/115", "ground_truth": " assert x in self.residual_space\n\n x = project(self.residual_space, self.fst.input_space)(x)\n hidden = self.fst(x)\n hidden = relu(hidden)\n out = self.snd(hidden)\n return project(self.snd.output_space, self.residual_space)(out)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 160, "function_name": "apply"}, "groundtruth": " assert x in self.residual_space\n\n x = project(self.residual_space, self.fst.input_space)(x)\n hidden = self.fst(x)\n hidden = relu(hidden)\n out = self.snd(hidden)\n return project(self.snd.output_space, self.residual_space)(out)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n\n def w_ov_residual(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Wov but acting on the residual space.\"\"\"\n x = project(self.residual_space, self.w_ov.input_space)(x)\n out = self.w_ov(x)\n return project(self.w_ov.output_space, self.residual_space)(out)\n\n @property\n def num_heads(self) -> int:\n return 1\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return MultiAttentionHead([self])\n\n\n@dataclasses.dataclass\nclass MultiAttentionHead(Block):\n \"\"\"Applies attention heads in parallel.\"\"\"\n sub_blocks: List[Union[AttentionHead, \"MultiAttentionHead\"]]\n\n def __post_init__(self):\n spaces = [block.residual_space for block in self.sub_blocks]\n self.residual_space, *others = spaces\n assert all(s == self.residual_space for s in others)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n # each element is seq_len x embedding\n outs = [block.apply(x) for block in self.sub_blocks]\n return bases.VectorInBasis.sum(outs) # seq_len x embedding\n\n @property\n def num_heads(self) -> int:\n return sum(sub_block.num_heads for sub_block in self.sub_blocks)\n\n def heads(self) -> Iterable[AttentionHead]:\n for sub_block in self.sub_blocks:\n if isinstance(sub_block, AttentionHead):\n yield sub_block\n elif isinstance(sub_block, MultiAttentionHead):\n yield from sub_block.heads()\n else:\n raise NotImplementedError()\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return self\n\n\n@dataclasses.dataclass\nclass MLP(Block):\n \"\"\"A transformer MLP block.\"\"\"\n fst: vectorspace_fns.Linear\n snd: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n\n def __post_init__(self):\n \"\"\"Typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.fst.input_space,\n self.snd.output_space)\n\n assert self.fst.output_space == self.snd.input_space\n assert self.fst.input_space.issubspace(self.residual_space)\n assert self.snd.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n\n x = project(self.residual_space, self.fst.input_space)(x)\n hidden = self.fst(x)\n hidden = relu(hidden)\n out = self.snd(hidden)\n return project(self.snd.output_space, self.residual_space)(out)\n\n @classmethod\n def combine_in_parallel(cls, mlps: Sequence[\"MLP\"]) -> \"MLP\":", "metadata": {"task_id": "deepmind--tracr/116", "ground_truth": " fst = vectorspace_fns.Linear.combine_in_parallel(\n [block.fst for block in mlps])\n snd = vectorspace_fns.Linear.combine_in_parallel(\n [block.snd for block in mlps])\n return cls(fst=fst, snd=snd, residual_space=None)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 170, "function_name": "combine_in_parallel"}, "groundtruth": " fst = vectorspace_fns.Linear.combine_in_parallel(\n [block.fst for block in mlps])\n snd = vectorspace_fns.Linear.combine_in_parallel(\n [block.snd for block in mlps])\n return cls(fst=fst, snd=snd, residual_space=None)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Pieces for making transformers.\"\"\"\n\nimport abc\nimport dataclasses\nfrom typing import Iterable, List, Optional, Sequence, Union\n\nimport numpy as np\n\nfrom tracr.craft import bases\nfrom tracr.craft import vectorspace_fns\n\nproject = vectorspace_fns.project\n\n\ndef _np_softmax(x, axis=-1):\n x_max = np.max(x, axis=axis, keepdims=True)\n return np.exp(x - x_max) / np.sum(np.exp(x - x_max), axis=axis, keepdims=True)\n\n\ndef _np_relu(x):\n return np.where(x > 0, x, 0)\n\n\ndef relu(x: bases.VectorInBasis) -> bases.VectorInBasis:\n return bases.VectorInBasis(x.basis_directions, _np_relu(x.magnitudes))\n\n\nclass Block(abc.ABC):\n \"\"\"Transformer block, acting on a sequence of vector space elements.\n\n Attributes:\n residual_space: Vector space that contains all subspaces the Block interacts\n with. This can be either the full residual space of a model or a subspace.\n \"\"\"\n residual_space: bases.VectorSpaceWithBasis\n\n @abc.abstractmethod\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Applies self to an input.\"\"\"\n\n\n@dataclasses.dataclass\nclass AttentionHead(Block):\n \"\"\"A transformer attention head.\"\"\"\n w_qk: vectorspace_fns.ScalarBilinear\n w_ov: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n causal: bool = False\n\n def __post_init__(self):\n \"\"\"Infer residual stream and typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.w_qk.left_space,\n self.w_qk.right_space,\n self.w_ov.input_space,\n self.w_ov.output_space)\n\n assert self.w_qk.left_space.issubspace(self.residual_space)\n assert self.w_qk.right_space.issubspace(self.residual_space)\n assert self.w_ov.input_space.issubspace(self.residual_space)\n assert self.w_ov.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n # seq_len x query_space\n queries = x.project(self.w_qk.left_space)\n # seq_len x key_space\n keys = x.project(self.w_qk.right_space)\n\n attn_matrix = queries.magnitudes @ self.w_qk.matrix @ keys.magnitudes.T\n\n if self.causal:\n # The 1 gives us the matrix above the diagonal.\n mask = np.triu(np.full_like(attn_matrix, -np.inf), 1)\n attn_matrix = attn_matrix + mask\n\n attn_weights = _np_softmax(attn_matrix) # seq_len_from, seq_len_to\n values = self.w_ov_residual(x).magnitudes # seq_len_to, d_model\n\n magnitudes = attn_weights @ values # seq_len_from, d_model\n return bases.VectorInBasis(sorted(self.residual_space.basis), magnitudes)\n\n def w_ov_residual(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n \"\"\"Wov but acting on the residual space.\"\"\"\n x = project(self.residual_space, self.w_ov.input_space)(x)\n out = self.w_ov(x)\n return project(self.w_ov.output_space, self.residual_space)(out)\n\n @property\n def num_heads(self) -> int:\n return 1\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return MultiAttentionHead([self])\n\n\n@dataclasses.dataclass\nclass MultiAttentionHead(Block):\n \"\"\"Applies attention heads in parallel.\"\"\"\n sub_blocks: List[Union[AttentionHead, \"MultiAttentionHead\"]]\n\n def __post_init__(self):\n spaces = [block.residual_space for block in self.sub_blocks]\n self.residual_space, *others = spaces\n assert all(s == self.residual_space for s in others)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n # each element is seq_len x embedding\n outs = [block.apply(x) for block in self.sub_blocks]\n return bases.VectorInBasis.sum(outs) # seq_len x embedding\n\n @property\n def num_heads(self) -> int:\n return sum(sub_block.num_heads for sub_block in self.sub_blocks)\n\n def heads(self) -> Iterable[AttentionHead]:\n for sub_block in self.sub_blocks:\n if isinstance(sub_block, AttentionHead):\n yield sub_block\n elif isinstance(sub_block, MultiAttentionHead):\n yield from sub_block.heads()\n else:\n raise NotImplementedError()\n\n def as_multi(self) -> \"MultiAttentionHead\":\n return self\n\n\n@dataclasses.dataclass\nclass MLP(Block):\n \"\"\"A transformer MLP block.\"\"\"\n fst: vectorspace_fns.Linear\n snd: vectorspace_fns.Linear\n residual_space: Optional[bases.VectorSpaceWithBasis] = None\n\n def __post_init__(self):\n \"\"\"Typecheck subspaces.\"\"\"\n if self.residual_space is None:\n self.residual_space = bases.join_vector_spaces(self.fst.input_space,\n self.snd.output_space)\n\n assert self.fst.output_space == self.snd.input_space\n assert self.fst.input_space.issubspace(self.residual_space)\n assert self.snd.output_space.issubspace(self.residual_space)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:\n assert x in self.residual_space\n\n x = project(self.residual_space, self.fst.input_space)(x)\n hidden = self.fst(x)\n hidden = relu(hidden)\n out = self.snd(hidden)\n return project(self.snd.output_space, self.residual_space)(out)\n\n @classmethod\n def combine_in_parallel(cls, mlps: Sequence[\"MLP\"]) -> \"MLP\":\n fst = vectorspace_fns.Linear.combine_in_parallel(\n [block.fst for block in mlps])\n snd = vectorspace_fns.Linear.combine_in_parallel(\n [block.snd for block in mlps])\n return cls(fst=fst, snd=snd, residual_space=None)\n\n\n# Block that fits into a half-layer, without residual connections.\nHalfLayerBlock = Union[MLP, AttentionHead, MultiAttentionHead]\n\n\n@dataclasses.dataclass\nclass SeriesWithResiduals(Block):\n \"\"\"A series of blocks with residual connections.\"\"\"\n blocks: List[HalfLayerBlock]\n\n def __post_init__(self):\n spaces = [block.residual_space for block in self.blocks]\n self.residual_space = bases.join_vector_spaces(*spaces)\n\n def apply(self, x: bases.VectorInBasis) -> bases.VectorInBasis:", "metadata": {"task_id": "deepmind--tracr/117", "ground_truth": " x = x.project(self.residual_space)\n for block in self.blocks:\n x_in = x.project(block.residual_space)\n x_out = block.apply(x_in).project(self.residual_space)\n x = x + x_out\n return x\n", "fpath_tuple": ["deepmind_tracr", "tracr", "craft", "transformers.py"], "context_start_lineno": 0, "lineno": 191, "function_name": "apply"}, "groundtruth": " x = x.project(self.residual_space)\n for block in self.blocks:\n x_in = x.project(block.residual_space)\n x_out = block.apply(x_in).project(self.residual_space)\n x = x + x_out\n return x\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helpers for handling errors in user-provided functions.\"\"\"\n\nimport functools\nimport logging\nfrom typing import Any, Callable\n\n\ndef ignoring_arithmetic_errors(fun: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"Makes fun return None instead of raising ArithmeticError.\"\"\"\n\n @functools.wraps(fun)", "metadata": {"task_id": "deepmind--tracr/118", "ground_truth": " def fun_wrapped(*args):\n try:\n return fun(*args)\n except ArithmeticError:\n logging.warning(\n \"Encountered arithmetic error in function: for value %s. \"\n \"Assuming this input will never occur.\", str(args))\n return None\n\n return fun_wrapped\n", "fpath_tuple": ["deepmind_tracr", "tracr", "utils", "errors.py"], "context_start_lineno": 0, "lineno": 25, "function_name": "ignoring_arithmetic_errors"}, "groundtruth": " def fun_wrapped(*args):\n try:\n return fun(*args)\n except ArithmeticError:\n logging.warning(\n \"Encountered arithmetic error in function: for value %s. \"\n \"Assuming this input will never occur.\", str(args))\n return None\n\n return fun_wrapped\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Helpers for handling errors in user-provided functions.\"\"\"\n\nimport functools\nimport logging\nfrom typing import Any, Callable\n\n\ndef ignoring_arithmetic_errors(fun: Callable[..., Any]) -> Callable[..., Any]:\n \"\"\"Makes fun return None instead of raising ArithmeticError.\"\"\"\n\n @functools.wraps(fun)\n def fun_wrapped(*args):", "metadata": {"task_id": "deepmind--tracr/119", "ground_truth": " try:\n return fun(*args)\n except ArithmeticError:\n logging.warning(\n \"Encountered arithmetic error in function: for value %s. \"\n \"Assuming this input will never occur.\", str(args))\n return None\n", "fpath_tuple": ["deepmind_tracr", "tracr", "utils", "errors.py"], "context_start_lineno": 0, "lineno": 26, "function_name": "fun_wrapped"}, "groundtruth": " try:\n return fun(*args)\n except ArithmeticError:\n logging.warning(\n \"Encountered arithmetic error in function: for value %s. \"\n \"Assuming this input will never occur.\", str(args))\n return None\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import compressed_model\nfrom tracr.transformer import model\n\n\nclass CompressedTransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"compressed_transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"compressed_transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"compressed_transformer/layer_{i}/layer_norm\"", "metadata": {"task_id": "deepmind--tracr/120", "ground_truth": " for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model_test.py"], "context_start_lineno": 0, "lineno": 33, "function_name": "_check_layer_naming"}, "groundtruth": " for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import compressed_model\nfrom tracr.transformer import model\n\n\nclass CompressedTransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"compressed_transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"compressed_transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"compressed_transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):", "metadata": {"task_id": "deepmind--tracr/121", "ground_truth": " for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model_test.py"], "context_start_lineno": 0, "lineno": 50, "function_name": "_zero_mlps"}, "groundtruth": " for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import compressed_model\nfrom tracr.transformer import model\n\n\nclass CompressedTransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"compressed_transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"compressed_transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"compressed_transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):", "metadata": {"task_id": "deepmind--tracr/122", "ground_truth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model_test.py"], "context_start_lineno": 0, "lineno": 65, "function_name": "forward"}, "groundtruth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import compressed_model\nfrom tracr.transformer import model\n\n\nclass CompressedTransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"compressed_transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"compressed_transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"compressed_transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):", "metadata": {"task_id": "deepmind--tracr/123", "ground_truth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model_test.py"], "context_start_lineno": 0, "lineno": 97, "function_name": "forward"}, "groundtruth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import compressed_model\nfrom tracr.transformer import model\n\n\nclass CompressedTransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"compressed_transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"compressed_transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"compressed_transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):", "metadata": {"task_id": "deepmind--tracr/124", "ground_truth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model_test.py"], "context_start_lineno": 0, "lineno": 135, "function_name": "forward_zero"}, "groundtruth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import compressed_model\nfrom tracr.transformer import model\n\n\nclass CompressedTransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"compressed_transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"compressed_transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"compressed_transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n\n @hk.transform\n def forward(emb, mask):", "metadata": {"task_id": "deepmind--tracr/125", "ground_truth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model_test.py"], "context_start_lineno": 0, "lineno": 149, "function_name": "forward"}, "groundtruth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import compressed_model\nfrom tracr.transformer import model\n\n\nclass CompressedTransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"compressed_transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"compressed_transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"compressed_transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n mask = np.ones((1, seq_len))\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params_no_mlps = self._zero_mlps(params)\n\n out_zero_activation = forward_zero.apply(params, next(rng), emb, mask)\n out_no_mlps = forward.apply(params_no_mlps, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n np.testing.assert_allclose(out_zero_activation, out_no_mlps)\n self.assertFalse(np.allclose(out_zero_activation, 0))\n\n def test_not_setting_embedding_size_produces_same_output_as_default_model(\n self):\n config = model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False)\n\n @hk.without_apply_rng\n @hk.transform\n def forward_model(emb, mask):\n return model.Transformer(config)(emb, mask).output\n\n @hk.without_apply_rng\n @hk.transform\n def forward_superposition(emb, mask):\n return compressed_model.CompressedTransformer(config)(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n mask = np.ones((1, seq_len))\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward_model.init(next(rng), emb, mask)\n params_superposition = {\n k.replace(\"transformer\", \"compressed_transformer\"): v\n for k, v in params.items()\n }\n\n out_model = forward_model.apply(params, emb, mask)\n out_superposition = forward_superposition.apply(params_superposition, emb,\n mask)\n\n self._check_layer_naming(params_superposition)\n np.testing.assert_allclose(out_model, out_superposition)\n\n @parameterized.parameters(\n dict(embedding_size=2, unembed_at_every_layer=True),\n dict(embedding_size=2, unembed_at_every_layer=False),\n dict(embedding_size=6, unembed_at_every_layer=True),\n dict(embedding_size=6, unembed_at_every_layer=False))\n def test_embbeding_size_produces_correct_shape_of_residuals_and_layer_outputs(\n self, embedding_size, unembed_at_every_layer):\n\n @hk.transform\n def forward(emb, mask):", "metadata": {"task_id": "deepmind--tracr/126", "ground_truth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False))\n return transformer(\n emb,\n mask,\n embedding_size=embedding_size,\n unembed_at_every_layer=unembed_at_every_layer,\n )\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model_test.py"], "context_start_lineno": 0, "lineno": 227, "function_name": "forward"}, "groundtruth": " transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False))\n return transformer(\n emb,\n mask,\n embedding_size=embedding_size,\n unembed_at_every_layer=unembed_at_every_layer,\n )\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import compressed_model\nfrom tracr.transformer import model\n\n\nclass CompressedTransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"compressed_transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"compressed_transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"compressed_transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"compressed_transformer\")\n if len(levels) == 1:\n self.assertEqual(list(params[key].keys()), [\"w_emb\"])\n continue\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n mask = np.ones((1, seq_len))\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params_no_mlps = self._zero_mlps(params)\n\n out_zero_activation = forward_zero.apply(params, next(rng), emb, mask)\n out_no_mlps = forward.apply(params_no_mlps, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n np.testing.assert_allclose(out_zero_activation, out_no_mlps)\n self.assertFalse(np.allclose(out_zero_activation, 0))\n\n def test_not_setting_embedding_size_produces_same_output_as_default_model(\n self):\n config = model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False)\n\n @hk.without_apply_rng\n @hk.transform\n def forward_model(emb, mask):\n return model.Transformer(config)(emb, mask).output\n\n @hk.without_apply_rng\n @hk.transform\n def forward_superposition(emb, mask):\n return compressed_model.CompressedTransformer(config)(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n mask = np.ones((1, seq_len))\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward_model.init(next(rng), emb, mask)\n params_superposition = {\n k.replace(\"transformer\", \"compressed_transformer\"): v\n for k, v in params.items()\n }\n\n out_model = forward_model.apply(params, emb, mask)\n out_superposition = forward_superposition.apply(params_superposition, emb,\n mask)\n\n self._check_layer_naming(params_superposition)\n np.testing.assert_allclose(out_model, out_superposition)\n\n @parameterized.parameters(\n dict(embedding_size=2, unembed_at_every_layer=True),\n dict(embedding_size=2, unembed_at_every_layer=False),\n dict(embedding_size=6, unembed_at_every_layer=True),\n dict(embedding_size=6, unembed_at_every_layer=False))\n def test_embbeding_size_produces_correct_shape_of_residuals_and_layer_outputs(\n self, embedding_size, unembed_at_every_layer):\n\n @hk.transform\n def forward(emb, mask):\n transformer = compressed_model.CompressedTransformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False))\n return transformer(\n emb,\n mask,\n embedding_size=embedding_size,\n unembed_at_every_layer=unembed_at_every_layer,\n )\n\n seq_len = 4\n model_size = 16\n\n emb = np.random.random((1, seq_len, model_size))\n mask = np.ones((1, seq_len))\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n activations = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n\n for residual in activations.residuals:\n self.assertEqual(residual.shape, (1, seq_len, embedding_size))\n\n for layer_output in activations.layer_outputs:\n self.assertEqual(layer_output.shape, (1, seq_len, model_size))\n\n @parameterized.parameters(\n dict(model_size=2, unembed_at_every_layer=True),\n dict(model_size=2, unembed_at_every_layer=False),\n dict(model_size=6, unembed_at_every_layer=True),\n dict(model_size=6, unembed_at_every_layer=False))\n def test_identity_embedding_produces_same_output_as_standard_model(\n self, model_size, unembed_at_every_layer):\n\n config = model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False)\n\n @hk.without_apply_rng\n @hk.transform\n def forward_model(emb, mask):\n return model.Transformer(config)(emb, mask).output\n\n @hk.without_apply_rng\n @hk.transform\n def forward_superposition(emb, mask):", "metadata": {"task_id": "deepmind--tracr/127", "ground_truth": " return compressed_model.CompressedTransformer(config)(\n emb,\n mask,\n embedding_size=model_size,\n unembed_at_every_layer=unembed_at_every_layer).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model_test.py"], "context_start_lineno": 0, "lineno": 287, "function_name": "forward_superposition"}, "groundtruth": " return compressed_model.CompressedTransformer(config)(\n emb,\n mask,\n embedding_size=model_size,\n unembed_at_every_layer=unembed_at_every_layer).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Didactic example of an autoregressive Transformer-based language model.\n\nGlossary of shapes:\n- B: Batch size.\n- T: Sequence length.\n- D: Model embedding size.\n- H: Number of attention heads.\n- V: Vocabulary size.\n\nForked from: haiku.examples.transformer.model\n\"\"\"\n\nimport collections\nimport dataclasses\nfrom typing import Callable, List, Optional\n\nimport chex\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import attention\n\n# hk.Modules are not always callable: github.com/deepmind/dm-haiku/issues/52\n# Ideally, we'd want a type:\n# CallableHaikuModule = Intersection[Callable[..., jax.Array], hk.Module]\n# But Intersection does not exist (yet): github.com/python/typing/issues/213\nCallableHaikuModule = Callable[..., jax.Array]\n\n\n@chex.dataclass\nclass TransformerOutput:\n layer_outputs: List[jax.Array] # [B, T, D]\n residuals: List[jax.Array] # [B, T, D]\n attn_logits: List[jax.Array] # [B, H, T, T]\n output: jax.Array # [B, T, D]\n input_embeddings: jax.Array # [B, T, D]\n\n\n@dataclasses.dataclass\nclass TransformerConfig:\n num_heads: int\n num_layers: int\n key_size: int\n mlp_hidden_size: int\n dropout_rate: float\n activation_function: Callable[[jax.Array], jax.Array] = jax.nn.gelu\n layer_norm: bool = True\n causal: bool = False\n\n\n@dataclasses.dataclass\nclass Transformer(hk.Module):\n \"\"\"A transformer stack.\"\"\"\n\n config: TransformerConfig\n name: Optional[str] = None\n\n def __call__(\n self,\n embeddings: jax.Array, # [B, T, D]\n mask: jax.Array, # [B, T]\n *,\n use_dropout: bool = True,\n ) -> TransformerOutput:\n \"\"\"Transforms input embedding sequences to output embedding sequences.\"\"\"\n\n def layer_norm(x: jax.Array) -> jax.Array:\n \"\"\"Applies a unique LayerNorm to x with default settings.\"\"\"", "metadata": {"task_id": "deepmind--tracr/128", "ground_truth": " if self.config.layer_norm:\n return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)\n return x\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model.py"], "context_start_lineno": 0, "lineno": 83, "function_name": "layer_norm"}, "groundtruth": " if self.config.layer_norm:\n return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)\n return x\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Didactic example of an autoregressive Transformer-based language model.\n\nGlossary of shapes:\n- B: Batch size.\n- T: Sequence length.\n- D: Model embedding size.\n- H: Number of attention heads.\n- V: Vocabulary size.\n\nForked from: haiku.examples.transformer.model\n\"\"\"\n\nimport collections\nimport dataclasses\nfrom typing import Callable, List, Optional\n\nimport chex\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import attention\n\n# hk.Modules are not always callable: github.com/deepmind/dm-haiku/issues/52\n# Ideally, we'd want a type:\n# CallableHaikuModule = Intersection[Callable[..., jax.Array], hk.Module]\n# But Intersection does not exist (yet): github.com/python/typing/issues/213\nCallableHaikuModule = Callable[..., jax.Array]\n\n\n@chex.dataclass\nclass TransformerOutput:\n layer_outputs: List[jax.Array] # [B, T, D]\n residuals: List[jax.Array] # [B, T, D]\n attn_logits: List[jax.Array] # [B, H, T, T]\n output: jax.Array # [B, T, D]\n input_embeddings: jax.Array # [B, T, D]\n\n\n@dataclasses.dataclass\nclass TransformerConfig:\n num_heads: int\n num_layers: int\n key_size: int\n mlp_hidden_size: int\n dropout_rate: float\n activation_function: Callable[[jax.Array], jax.Array] = jax.nn.gelu\n layer_norm: bool = True\n causal: bool = False\n\n\n@dataclasses.dataclass\nclass Transformer(hk.Module):\n \"\"\"A transformer stack.\"\"\"\n\n config: TransformerConfig\n name: Optional[str] = None\n\n def __call__(\n self,\n embeddings: jax.Array, # [B, T, D]\n mask: jax.Array, # [B, T]\n *,\n use_dropout: bool = True,\n ) -> TransformerOutput:\n \"\"\"Transforms input embedding sequences to output embedding sequences.\"\"\"\n\n def layer_norm(x: jax.Array) -> jax.Array:\n \"\"\"Applies a unique LayerNorm to x with default settings.\"\"\"\n if self.config.layer_norm:\n return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)\n return x\n\n initializer = hk.initializers.VarianceScaling(2 / self.config.num_layers)\n dropout_rate = self.config.dropout_rate if use_dropout else 0.\n _, seq_len, model_size = embeddings.shape\n\n # Compute causal mask for autoregressive sequence modelling.\n mask = mask[:, None, None, :] # [B, H=1, T'=1, T]\n mask = mask.repeat(seq_len, axis=2) # [B, H=1, T, T]\n\n if self.config.causal:\n causal_mask = np.ones((1, 1, seq_len, seq_len)) # [B=1, H=1, T, T]\n causal_mask = np.tril(causal_mask)\n mask = mask * causal_mask # [B, H=1, T, T]\n\n # Set up activation collection.\n collected = collections.defaultdict(list)\n\n def collect(**kwargs):\n for k, v in kwargs.items():\n collected[k].append(v)\n\n residual = embeddings\n for layer in range(self.config.num_layers):\n with hk.experimental.name_scope(f\"layer_{layer}\"):\n # First the attention block.\n attn_block = attention.MultiHeadAttention(\n num_heads=self.config.num_heads,\n key_size=self.config.key_size,\n model_size=model_size,\n w_init=initializer,\n name=\"attn\")\n attn_in = layer_norm(residual)\n attn_out = attn_block(attn_in, attn_in, attn_in, mask=mask)\n attn_out, attn_logits = attn_out.out, attn_out.logits\n if dropout_rate > 0:\n attn_out = hk.dropout(hk.next_rng_key(), dropout_rate, attn_out)\n residual = residual + attn_out\n\n collect(\n residuals=residual, layer_outputs=attn_out, attn_logits=attn_logits)\n\n # Then the dense block.\n with hk.experimental.name_scope(\"mlp\"):\n dense_block = hk.Sequential([\n hk.Linear(\n self.config.mlp_hidden_size,\n w_init=initializer,\n name=\"linear_1\"),\n self.config.activation_function,\n hk.Linear(model_size, w_init=initializer, name=\"linear_2\"),\n ])\n dense_in = layer_norm(residual)\n dense_out = dense_block(dense_in)\n if dropout_rate > 0:\n dense_out = hk.dropout(hk.next_rng_key(), dropout_rate, dense_out)\n residual = residual + dense_out\n\n collect(residuals=residual, layer_outputs=dense_out)\n\n return TransformerOutput(\n residuals=collected[\"residuals\"],\n layer_outputs=collected[\"layer_outputs\"],\n attn_logits=collected[\"attn_logits\"],\n output=layer_norm(residual),\n input_embeddings=embeddings,\n )\n\n\n@chex.dataclass\nclass CompiledTransformerModelOutput:\n transformer_output: TransformerOutput\n unembedded_output: jax.Array # [B, T]\n\n\n@dataclasses.dataclass\nclass CompiledTransformerModel(hk.Module):\n \"\"\"A transformer model with one-hot embeddings.\"\"\"\n transformer: Transformer\n token_embed: CallableHaikuModule\n position_embed: CallableHaikuModule\n unembed: CallableHaikuModule\n use_unembed_argmax: bool\n pad_token: Optional[int] = None\n\n def embed(self, tokens: jax.Array) -> jax.Array:", "metadata": {"task_id": "deepmind--tracr/129", "ground_truth": " token_embeddings = self.token_embed(tokens)\n positional_embeddings = self.position_embed(jnp.indices(tokens.shape)[-1])\n return token_embeddings + positional_embeddings # [B, T, D]\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model.py"], "context_start_lineno": 0, "lineno": 171, "function_name": "embed"}, "groundtruth": " token_embeddings = self.token_embed(tokens)\n positional_embeddings = self.position_embed(jnp.indices(tokens.shape)[-1])\n return token_embeddings + positional_embeddings # [B, T, D]\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Didactic example of an autoregressive Transformer-based language model.\n\nGlossary of shapes:\n- B: Batch size.\n- T: Sequence length.\n- D: Model embedding size.\n- H: Number of attention heads.\n- V: Vocabulary size.\n\nForked from: haiku.examples.transformer.model\n\"\"\"\n\nimport collections\nimport dataclasses\nfrom typing import Callable, List, Optional\n\nimport chex\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import attention\n\n# hk.Modules are not always callable: github.com/deepmind/dm-haiku/issues/52\n# Ideally, we'd want a type:\n# CallableHaikuModule = Intersection[Callable[..., jax.Array], hk.Module]\n# But Intersection does not exist (yet): github.com/python/typing/issues/213\nCallableHaikuModule = Callable[..., jax.Array]\n\n\n@chex.dataclass\nclass TransformerOutput:\n layer_outputs: List[jax.Array] # [B, T, D]\n residuals: List[jax.Array] # [B, T, D]\n attn_logits: List[jax.Array] # [B, H, T, T]\n output: jax.Array # [B, T, D]\n input_embeddings: jax.Array # [B, T, D]\n\n\n@dataclasses.dataclass\nclass TransformerConfig:\n num_heads: int\n num_layers: int\n key_size: int\n mlp_hidden_size: int\n dropout_rate: float\n activation_function: Callable[[jax.Array], jax.Array] = jax.nn.gelu\n layer_norm: bool = True\n causal: bool = False\n\n\n@dataclasses.dataclass\nclass Transformer(hk.Module):\n \"\"\"A transformer stack.\"\"\"\n\n config: TransformerConfig\n name: Optional[str] = None\n\n def __call__(\n self,\n embeddings: jax.Array, # [B, T, D]\n mask: jax.Array, # [B, T]\n *,\n use_dropout: bool = True,\n ) -> TransformerOutput:\n \"\"\"Transforms input embedding sequences to output embedding sequences.\"\"\"\n\n def layer_norm(x: jax.Array) -> jax.Array:\n \"\"\"Applies a unique LayerNorm to x with default settings.\"\"\"\n if self.config.layer_norm:\n return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)\n return x\n\n initializer = hk.initializers.VarianceScaling(2 / self.config.num_layers)\n dropout_rate = self.config.dropout_rate if use_dropout else 0.\n _, seq_len, model_size = embeddings.shape\n\n # Compute causal mask for autoregressive sequence modelling.\n mask = mask[:, None, None, :] # [B, H=1, T'=1, T]\n mask = mask.repeat(seq_len, axis=2) # [B, H=1, T, T]\n\n if self.config.causal:\n causal_mask = np.ones((1, 1, seq_len, seq_len)) # [B=1, H=1, T, T]\n causal_mask = np.tril(causal_mask)\n mask = mask * causal_mask # [B, H=1, T, T]\n\n # Set up activation collection.\n collected = collections.defaultdict(list)\n\n def collect(**kwargs):\n for k, v in kwargs.items():\n collected[k].append(v)\n\n residual = embeddings\n for layer in range(self.config.num_layers):\n with hk.experimental.name_scope(f\"layer_{layer}\"):\n # First the attention block.\n attn_block = attention.MultiHeadAttention(\n num_heads=self.config.num_heads,\n key_size=self.config.key_size,\n model_size=model_size,\n w_init=initializer,\n name=\"attn\")\n attn_in = layer_norm(residual)\n attn_out = attn_block(attn_in, attn_in, attn_in, mask=mask)\n attn_out, attn_logits = attn_out.out, attn_out.logits\n if dropout_rate > 0:\n attn_out = hk.dropout(hk.next_rng_key(), dropout_rate, attn_out)\n residual = residual + attn_out\n\n collect(\n residuals=residual, layer_outputs=attn_out, attn_logits=attn_logits)\n\n # Then the dense block.\n with hk.experimental.name_scope(\"mlp\"):\n dense_block = hk.Sequential([\n hk.Linear(\n self.config.mlp_hidden_size,\n w_init=initializer,\n name=\"linear_1\"),\n self.config.activation_function,\n hk.Linear(model_size, w_init=initializer, name=\"linear_2\"),\n ])\n dense_in = layer_norm(residual)\n dense_out = dense_block(dense_in)\n if dropout_rate > 0:\n dense_out = hk.dropout(hk.next_rng_key(), dropout_rate, dense_out)\n residual = residual + dense_out\n\n collect(residuals=residual, layer_outputs=dense_out)\n\n return TransformerOutput(\n residuals=collected[\"residuals\"],\n layer_outputs=collected[\"layer_outputs\"],\n attn_logits=collected[\"attn_logits\"],\n output=layer_norm(residual),\n input_embeddings=embeddings,\n )\n\n\n@chex.dataclass\nclass CompiledTransformerModelOutput:\n transformer_output: TransformerOutput\n unembedded_output: jax.Array # [B, T]\n\n\n@dataclasses.dataclass\nclass CompiledTransformerModel(hk.Module):\n \"\"\"A transformer model with one-hot embeddings.\"\"\"\n transformer: Transformer\n token_embed: CallableHaikuModule\n position_embed: CallableHaikuModule\n unembed: CallableHaikuModule\n use_unembed_argmax: bool\n pad_token: Optional[int] = None\n\n def embed(self, tokens: jax.Array) -> jax.Array:\n token_embeddings = self.token_embed(tokens)\n positional_embeddings = self.position_embed(jnp.indices(tokens.shape)[-1])\n return token_embeddings + positional_embeddings # [B, T, D]\n\n def __call__(\n self,\n tokens: jax.Array,\n use_dropout: bool = True,\n ) -> CompiledTransformerModelOutput:\n \"\"\"Embed tokens, pass through model, and unembed output.\"\"\"", "metadata": {"task_id": "deepmind--tracr/130", "ground_truth": " if self.pad_token is None:\n input_mask = jnp.ones_like(tokens)\n else:\n input_mask = (tokens != self.pad_token)\n input_embeddings = self.embed(tokens)\n\n transformer_output = self.transformer(\n input_embeddings,\n input_mask,\n use_dropout=use_dropout,\n )\n return CompiledTransformerModelOutput(\n transformer_output=transformer_output,\n unembedded_output=self.unembed(\n transformer_output.output,\n use_unembed_argmax=self.use_unembed_argmax,\n ),\n )\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model.py"], "context_start_lineno": 0, "lineno": 181, "function_name": "__call__"}, "groundtruth": " if self.pad_token is None:\n input_mask = jnp.ones_like(tokens)\n else:\n input_mask = (tokens != self.pad_token)\n input_embeddings = self.embed(tokens)\n\n transformer_output = self.transformer(\n input_embeddings,\n input_mask,\n use_dropout=use_dropout,\n )\n return CompiledTransformerModelOutput(\n transformer_output=transformer_output,\n unembedded_output=self.unembed(\n transformer_output.output,\n use_unembed_argmax=self.use_unembed_argmax,\n ),\n )\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Modified transformer to learn a linear compression of the residual stream.\n\nCompressedTransformer adds three arguments compared to Transformer:\n- embedding_size: the size of the compressed residual stream.\n- unembed_at_every_layer: whether to apply the unembedding before applying\n attention and MLP layers\n- return_activations: whether to return all model activations rather than just\n the outputs\n\"\"\"\n\nimport collections\nimport dataclasses\nfrom typing import Optional\n\nimport haiku as hk\nimport jax\nimport numpy as np\n\nfrom tracr.transformer import attention\nfrom tracr.transformer import model\n\n\n@dataclasses.dataclass\nclass CompressedTransformer(hk.Module):\n \"\"\"A transformer stack with linearly compressed residual stream.\"\"\"\n\n config: model.TransformerConfig\n name: Optional[str] = None\n\n def __call__(\n self,\n embeddings: jax.Array, # [B, T, D]\n mask: jax.Array, # [B, T]\n *,\n use_dropout: bool = True,\n embedding_size: Optional[int] = None,\n unembed_at_every_layer: bool = False,\n ) -> model.TransformerOutput: # [B, T, D]\n \"\"\"Transforms input embedding sequences to output embedding sequences.\n\n Args:\n embeddings: Input embeddings to pass through the model.\n mask: Boolean mask to restrict the inputs the model uses.\n use_dropout: Turns dropout on/off.\n embedding_size: Dimension to compress the residual stream to.\n unembed_at_every_layer: Whether to unembed the residual stream when\n reading the input for every layer (keeping the layer input sizes) or to\n only unembed before the model output (compressing the layer inputs).\n\n Returns:\n The outputs of the forward pass through the transformer.\n \"\"\"\n\n def layer_norm(x: jax.Array) -> jax.Array:\n \"\"\"Applies a unique LayerNorm to x with default settings.\"\"\"", "metadata": {"task_id": "deepmind--tracr/131", "ground_truth": " if self.config.layer_norm:\n return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)\n return x\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "compressed_model.py"], "context_start_lineno": 0, "lineno": 69, "function_name": "layer_norm"}, "groundtruth": " if self.config.layer_norm:\n return hk.LayerNorm(axis=-1, create_scale=True, create_offset=True)(x)\n return x\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Basic encoder for inputs with a fixed vocabulary.\"\"\"\n\nimport abc\nfrom typing import Any, List, Optional, Sequence\n\nfrom tracr.craft import bases\n\n\nclass Encoder(abc.ABC):\n \"\"\"Encodes a list of tokens into a list of inputs for a transformer model.\n\n The abstract class does not make assumptions on the input and output types,\n and we have different encoders for different input types.\n \"\"\"\n\n @abc.abstractmethod\n def encode(self, inputs: List[Any]) -> List[Any]:\n return list()\n\n @abc.abstractmethod\n def decode(self, encodings: List[Any]) -> List[Any]:\n return list()\n\n @property\n def pad_token(self) -> Optional[str]:\n return None\n\n @property\n def bos_token(self) -> Optional[str]:\n return None\n\n @property\n def pad_encoding(self) -> Optional[int]:\n return None\n\n @property\n def bos_encoding(self) -> Optional[int]:\n return None\n\n\nclass NumericalEncoder(Encoder):\n \"\"\"Encodes numerical variables (simply using the identity mapping).\"\"\"\n\n def encode(self, inputs: List[float]) -> List[float]:\n return inputs\n\n def decode(self, encodings: List[float]) -> List[float]:\n return encodings\n\n\nclass CategoricalEncoder(Encoder):\n \"\"\"Encodes categorical variables with a fixed vocabulary.\"\"\"\n\n def __init__(\n self,\n basis: Sequence[bases.BasisDirection],\n enforce_bos: bool = False,\n bos_token: Optional[str] = None,\n pad_token: Optional[str] = None,\n max_seq_len: Optional[int] = None,\n ):\n \"\"\"Initialises. If enforce_bos is set, ensures inputs start with it.\"\"\"", "metadata": {"task_id": "deepmind--tracr/132", "ground_truth": " if enforce_bos and not bos_token:\n raise ValueError(\"BOS token must be specified if enforcing BOS.\")\n\n self.encoding_map = {}\n for i, direction in enumerate(basis):\n val = direction.value\n self.encoding_map[val] = i\n\n if bos_token and bos_token not in self.encoding_map:\n raise ValueError(\"BOS token missing in encoding.\")\n\n if pad_token and pad_token not in self.encoding_map:\n raise ValueError(\"PAD token missing in encoding.\")\n\n self.enforce_bos = enforce_bos\n self._bos_token = bos_token\n self._pad_token = pad_token\n self._max_seq_len = max_seq_len\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "encoder.py"], "context_start_lineno": 0, "lineno": 76, "function_name": "__init__"}, "groundtruth": " if enforce_bos and not bos_token:\n raise ValueError(\"BOS token must be specified if enforcing BOS.\")\n\n self.encoding_map = {}\n for i, direction in enumerate(basis):\n val = direction.value\n self.encoding_map[val] = i\n\n if bos_token and bos_token not in self.encoding_map:\n raise ValueError(\"BOS token missing in encoding.\")\n\n if pad_token and pad_token not in self.encoding_map:\n raise ValueError(\"PAD token missing in encoding.\")\n\n self.enforce_bos = enforce_bos\n self._bos_token = bos_token\n self._pad_token = pad_token\n self._max_seq_len = max_seq_len\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Basic encoder for inputs with a fixed vocabulary.\"\"\"\n\nimport abc\nfrom typing import Any, List, Optional, Sequence\n\nfrom tracr.craft import bases\n\n\nclass Encoder(abc.ABC):\n \"\"\"Encodes a list of tokens into a list of inputs for a transformer model.\n\n The abstract class does not make assumptions on the input and output types,\n and we have different encoders for different input types.\n \"\"\"\n\n @abc.abstractmethod\n def encode(self, inputs: List[Any]) -> List[Any]:\n return list()\n\n @abc.abstractmethod\n def decode(self, encodings: List[Any]) -> List[Any]:\n return list()\n\n @property\n def pad_token(self) -> Optional[str]:\n return None\n\n @property\n def bos_token(self) -> Optional[str]:\n return None\n\n @property\n def pad_encoding(self) -> Optional[int]:\n return None\n\n @property\n def bos_encoding(self) -> Optional[int]:\n return None\n\n\nclass NumericalEncoder(Encoder):\n \"\"\"Encodes numerical variables (simply using the identity mapping).\"\"\"\n\n def encode(self, inputs: List[float]) -> List[float]:\n return inputs\n\n def decode(self, encodings: List[float]) -> List[float]:\n return encodings\n\n\nclass CategoricalEncoder(Encoder):\n \"\"\"Encodes categorical variables with a fixed vocabulary.\"\"\"\n\n def __init__(\n self,\n basis: Sequence[bases.BasisDirection],\n enforce_bos: bool = False,\n bos_token: Optional[str] = None,\n pad_token: Optional[str] = None,\n max_seq_len: Optional[int] = None,\n ):\n \"\"\"Initialises. If enforce_bos is set, ensures inputs start with it.\"\"\"\n if enforce_bos and not bos_token:\n raise ValueError(\"BOS token must be specified if enforcing BOS.\")\n\n self.encoding_map = {}\n for i, direction in enumerate(basis):\n val = direction.value\n self.encoding_map[val] = i\n\n if bos_token and bos_token not in self.encoding_map:\n raise ValueError(\"BOS token missing in encoding.\")\n\n if pad_token and pad_token not in self.encoding_map:\n raise ValueError(\"PAD token missing in encoding.\")\n\n self.enforce_bos = enforce_bos\n self._bos_token = bos_token\n self._pad_token = pad_token\n self._max_seq_len = max_seq_len\n\n def encode(self, inputs: List[bases.Value]) -> List[int]:", "metadata": {"task_id": "deepmind--tracr/133", "ground_truth": " if self.enforce_bos and inputs[0] != self.bos_token:\n raise ValueError(\"First input token must be BOS token. \"\n f\"Should be '{self.bos_token}', but was '{inputs[0]}'.\")\n if missing := set(inputs) - set(self.encoding_map.keys()):\n raise ValueError(f\"Inputs {missing} not found in encoding \",\n self.encoding_map.keys())\n if self._max_seq_len is not None and len(inputs) > self._max_seq_len:\n raise ValueError(f\"inputs={inputs} are longer than the maximum \"\n f\"sequence length {self._max_seq_len}\")\n\n return [self.encoding_map[x] for x in inputs]\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "encoder.py"], "context_start_lineno": 0, "lineno": 96, "function_name": "encode"}, "groundtruth": " if self.enforce_bos and inputs[0] != self.bos_token:\n raise ValueError(\"First input token must be BOS token. \"\n f\"Should be '{self.bos_token}', but was '{inputs[0]}'.\")\n if missing := set(inputs) - set(self.encoding_map.keys()):\n raise ValueError(f\"Inputs {missing} not found in encoding \",\n self.encoding_map.keys())\n if self._max_seq_len is not None and len(inputs) > self._max_seq_len:\n raise ValueError(f\"inputs={inputs} are longer than the maximum \"\n f\"sequence length {self._max_seq_len}\")\n\n return [self.encoding_map[x] for x in inputs]\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Basic encoder for inputs with a fixed vocabulary.\"\"\"\n\nimport abc\nfrom typing import Any, List, Optional, Sequence\n\nfrom tracr.craft import bases\n\n\nclass Encoder(abc.ABC):\n \"\"\"Encodes a list of tokens into a list of inputs for a transformer model.\n\n The abstract class does not make assumptions on the input and output types,\n and we have different encoders for different input types.\n \"\"\"\n\n @abc.abstractmethod\n def encode(self, inputs: List[Any]) -> List[Any]:\n return list()\n\n @abc.abstractmethod\n def decode(self, encodings: List[Any]) -> List[Any]:\n return list()\n\n @property\n def pad_token(self) -> Optional[str]:\n return None\n\n @property\n def bos_token(self) -> Optional[str]:\n return None\n\n @property\n def pad_encoding(self) -> Optional[int]:\n return None\n\n @property\n def bos_encoding(self) -> Optional[int]:\n return None\n\n\nclass NumericalEncoder(Encoder):\n \"\"\"Encodes numerical variables (simply using the identity mapping).\"\"\"\n\n def encode(self, inputs: List[float]) -> List[float]:\n return inputs\n\n def decode(self, encodings: List[float]) -> List[float]:\n return encodings\n\n\nclass CategoricalEncoder(Encoder):\n \"\"\"Encodes categorical variables with a fixed vocabulary.\"\"\"\n\n def __init__(\n self,\n basis: Sequence[bases.BasisDirection],\n enforce_bos: bool = False,\n bos_token: Optional[str] = None,\n pad_token: Optional[str] = None,\n max_seq_len: Optional[int] = None,\n ):\n \"\"\"Initialises. If enforce_bos is set, ensures inputs start with it.\"\"\"\n if enforce_bos and not bos_token:\n raise ValueError(\"BOS token must be specified if enforcing BOS.\")\n\n self.encoding_map = {}\n for i, direction in enumerate(basis):\n val = direction.value\n self.encoding_map[val] = i\n\n if bos_token and bos_token not in self.encoding_map:\n raise ValueError(\"BOS token missing in encoding.\")\n\n if pad_token and pad_token not in self.encoding_map:\n raise ValueError(\"PAD token missing in encoding.\")\n\n self.enforce_bos = enforce_bos\n self._bos_token = bos_token\n self._pad_token = pad_token\n self._max_seq_len = max_seq_len\n\n def encode(self, inputs: List[bases.Value]) -> List[int]:\n if self.enforce_bos and inputs[0] != self.bos_token:\n raise ValueError(\"First input token must be BOS token. \"\n f\"Should be '{self.bos_token}', but was '{inputs[0]}'.\")\n if missing := set(inputs) - set(self.encoding_map.keys()):\n raise ValueError(f\"Inputs {missing} not found in encoding \",\n self.encoding_map.keys())\n if self._max_seq_len is not None and len(inputs) > self._max_seq_len:\n raise ValueError(f\"inputs={inputs} are longer than the maximum \"\n f\"sequence length {self._max_seq_len}\")\n\n return [self.encoding_map[x] for x in inputs]\n\n def decode(self, encodings: List[int]) -> List[bases.Value]:\n \"\"\"Recover the tokens that corresponds to `ids`. Inverse of __call__.\"\"\"", "metadata": {"task_id": "deepmind--tracr/134", "ground_truth": " decoding_map = {val: key for key, val in self.encoding_map.items()}\n if missing := set(encodings) - set(decoding_map.keys()):\n raise ValueError(f\"Inputs {missing} not found in decoding map \",\n decoding_map.keys())\n return [decoding_map[x] for x in encodings]\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "encoder.py"], "context_start_lineno": 0, "lineno": 110, "function_name": "decode"}, "groundtruth": " decoding_map = {val: key for key, val in self.encoding_map.items()}\n if missing := set(encodings) - set(decoding_map.keys()):\n raise ValueError(f\"Inputs {missing} not found in decoding map \",\n decoding_map.keys())\n return [decoding_map[x] for x in encodings]\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"", "metadata": {"task_id": "deepmind--tracr/135", "ground_truth": " for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 32, "function_name": "_check_layer_naming"}, "groundtruth": " for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):", "metadata": {"task_id": "deepmind--tracr/136", "ground_truth": " for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 46, "function_name": "_zero_mlps"}, "groundtruth": " for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):", "metadata": {"task_id": "deepmind--tracr/137", "ground_truth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 61, "function_name": "forward"}, "groundtruth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):", "metadata": {"task_id": "deepmind--tracr/138", "ground_truth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 93, "function_name": "forward"}, "groundtruth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):", "metadata": {"task_id": "deepmind--tracr/139", "ground_truth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 131, "function_name": "forward_zero"}, "groundtruth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n\n @hk.transform\n def forward(emb, mask):", "metadata": {"task_id": "deepmind--tracr/140", "ground_truth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 145, "function_name": "forward"}, "groundtruth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n mask = np.ones((1, seq_len))\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params_no_mlps = self._zero_mlps(params)\n\n out_zero_activation = forward_zero.apply(params, next(rng), emb, mask)\n out_no_mlps = forward.apply(params_no_mlps, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n np.testing.assert_allclose(out_zero_activation, out_no_mlps)\n self.assertFalse(np.allclose(out_zero_activation, 0))\n\n\nclass CompiledTransformerModelTest(parameterized.TestCase):\n\n def _get_one_hot_embed_unembed(self, vocab_size, max_seq_len):\n # Embeds tokens as one-hot into the first `vocab_size` dimensions", "metadata": {"task_id": "deepmind--tracr/141", "ground_truth": " token_embed = hk.Embed(\n embedding_matrix=jnp.block(\n [jnp.eye(vocab_size),\n jnp.zeros((vocab_size, max_seq_len))]))\n\n # Embeds positions as one-hot into the last `max_seq_len` dimensions\n position_embed = hk.Embed(\n embedding_matrix=jnp.block(\n [jnp.zeros((max_seq_len, vocab_size)),\n jnp.eye(max_seq_len)]))\n\n class Unembed(hk.Module):\n\n def __call__(self, embeddings):\n return jnp.argmax(embeddings[:, :, :vocab_size], axis=-1)\n\n return token_embed, position_embed, Unembed()\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 178, "function_name": "_get_one_hot_embed_unembed"}, "groundtruth": " token_embed = hk.Embed(\n embedding_matrix=jnp.block(\n [jnp.eye(vocab_size),\n jnp.zeros((vocab_size, max_seq_len))]))\n\n # Embeds positions as one-hot into the last `max_seq_len` dimensions\n position_embed = hk.Embed(\n embedding_matrix=jnp.block(\n [jnp.zeros((max_seq_len, vocab_size)),\n jnp.eye(max_seq_len)]))\n\n class Unembed(hk.Module):\n\n def __call__(self, embeddings):\n return jnp.argmax(embeddings[:, :, :vocab_size], axis=-1)\n\n return token_embed, position_embed, Unembed()\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n mask = np.ones((1, seq_len))\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params_no_mlps = self._zero_mlps(params)\n\n out_zero_activation = forward_zero.apply(params, next(rng), emb, mask)\n out_no_mlps = forward.apply(params_no_mlps, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n np.testing.assert_allclose(out_zero_activation, out_no_mlps)\n self.assertFalse(np.allclose(out_zero_activation, 0))\n\n\nclass CompiledTransformerModelTest(parameterized.TestCase):\n\n def _get_one_hot_embed_unembed(self, vocab_size, max_seq_len):\n # Embeds tokens as one-hot into the first `vocab_size` dimensions\n token_embed = hk.Embed(\n embedding_matrix=jnp.block(\n [jnp.eye(vocab_size),\n jnp.zeros((vocab_size, max_seq_len))]))\n\n # Embeds positions as one-hot into the last `max_seq_len` dimensions\n position_embed = hk.Embed(\n embedding_matrix=jnp.block(\n [jnp.zeros((max_seq_len, vocab_size)),\n jnp.eye(max_seq_len)]))\n\n class Unembed(hk.Module):\n\n def __call__(self, embeddings):\n return jnp.argmax(embeddings[:, :, :vocab_size], axis=-1)\n\n return token_embed, position_embed, Unembed()\n\n def test_embedding_gives_desired_result(self):\n tokens = jnp.array([[1, 2, 3]])\n vocab_size, max_seq_len, pad_token = 5, 5, 0\n\n expected_embeddings = jnp.array([[[0, 1, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 1, 0, 0]]])\n\n @hk.transform\n def embed(tokens):", "metadata": {"task_id": "deepmind--tracr/142", "ground_truth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n token_embed, position_embed, unembed = self._get_one_hot_embed_unembed(\n vocab_size, max_seq_len)\n compiled_model = model.CompiledTransformerModel(\n transformer=transformer,\n token_embed=token_embed,\n position_embed=position_embed,\n unembed=unembed,\n use_unembed_argmax=True,\n pad_token=pad_token)\n return compiled_model.embed(tokens)\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 206, "function_name": "embed"}, "groundtruth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n token_embed, position_embed, unembed = self._get_one_hot_embed_unembed(\n vocab_size, max_seq_len)\n compiled_model = model.CompiledTransformerModel(\n transformer=transformer,\n token_embed=token_embed,\n position_embed=position_embed,\n unembed=unembed,\n use_unembed_argmax=True,\n pad_token=pad_token)\n return compiled_model.embed(tokens)\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for transformer.model.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nfrom tracr.transformer import model\n\n\nclass TransformerTest(parameterized.TestCase):\n\n def _check_layer_naming(self, params):\n # Modules should be named for example\n # For MLPs: \"transformer/layer_{i}/mlp/linear_1\"\n # For Attention: \"transformer/layer_{i}/attn/key\"\n # For Layer Norm: \"transformer/layer_{i}/layer_norm\"\n for key in params.keys():\n levels = key.split(\"/\")\n self.assertEqual(levels[0], \"transformer\")\n if levels[1].startswith(\"layer_norm\"):\n continue # output layer norm\n self.assertStartsWith(levels[1], \"layer\")\n if levels[2] == \"mlp\":\n self.assertIn(levels[3], {\"linear_1\", \"linear_2\"})\n elif levels[2] == \"attn\":\n self.assertIn(levels[3], {\"key\", \"query\", \"value\", \"linear\"})\n else:\n self.assertStartsWith(levels[2], \"layer_norm\")\n\n def _zero_mlps(self, params):\n for module in params:\n if \"mlp\" in module:\n for param in params[module]:\n params[module][param] = jnp.zeros_like(params[module][param])\n return params\n\n @parameterized.parameters(dict(layer_norm=True), dict(layer_norm=False))\n def test_layer_norm(self, layer_norm):\n # input = [1, 1, 1, 1]\n # If layer norm is used, this should give all-0 output for a freshly\n # initialized model because LN will subtract the mean after each layer.\n # Else we expect non-zero outputs.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=layer_norm))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = jnp.ones((1, seq_len, 1))\n mask = jnp.ones((1, seq_len))\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if layer_norm:\n np.testing.assert_allclose(out, 0)\n else:\n self.assertFalse(np.allclose(out, 0))\n\n @parameterized.parameters(dict(causal=True), dict(causal=False))\n def test_causal_attention(self, causal):\n # input = [0, random, random, random]\n # mask = [1, 0, 1, 1]\n # For causal attention the second token can only attend to the first one, so\n # it should be the same. For non-causal attention all tokens should change.\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n layer_norm=False,\n causal=causal))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n emb[:, 0, :] = 0\n mask = np.array([[1, 0, 1, 1]])\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params = self._zero_mlps(params)\n out = forward.apply(params, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n if causal:\n self.assertEqual(0, out[0, 0, 0])\n self.assertEqual(emb[0, 1, 0], out[0, 1, 0])\n else:\n self.assertNotEqual(0, out[0, 0, 0])\n self.assertNotEqual(emb[0, 1, 0], out[0, 1, 0])\n self.assertNotEqual(emb[0, 2, 0], out[0, 2, 0])\n self.assertNotEqual(emb[0, 3, 0], out[0, 3, 0])\n\n def test_setting_activation_function_to_zero(self):\n # An activation function that always returns zeros should result in the\n # same model output as setting all MLP weights to zero.\n\n @hk.transform\n def forward_zero(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jnp.zeros_like))\n return transformer(emb, mask).output\n\n @hk.transform\n def forward(emb, mask):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n return transformer(emb, mask).output\n\n seq_len = 4\n emb = np.random.random((1, seq_len, 1))\n mask = np.ones((1, seq_len))\n emb, mask = jnp.array(emb), jnp.array(mask)\n\n rng = hk.PRNGSequence(1)\n params = forward.init(next(rng), emb, mask)\n params_no_mlps = self._zero_mlps(params)\n\n out_zero_activation = forward_zero.apply(params, next(rng), emb, mask)\n out_no_mlps = forward.apply(params_no_mlps, next(rng), emb, mask)\n\n self._check_layer_naming(params)\n np.testing.assert_allclose(out_zero_activation, out_no_mlps)\n self.assertFalse(np.allclose(out_zero_activation, 0))\n\n\nclass CompiledTransformerModelTest(parameterized.TestCase):\n\n def _get_one_hot_embed_unembed(self, vocab_size, max_seq_len):\n # Embeds tokens as one-hot into the first `vocab_size` dimensions\n token_embed = hk.Embed(\n embedding_matrix=jnp.block(\n [jnp.eye(vocab_size),\n jnp.zeros((vocab_size, max_seq_len))]))\n\n # Embeds positions as one-hot into the last `max_seq_len` dimensions\n position_embed = hk.Embed(\n embedding_matrix=jnp.block(\n [jnp.zeros((max_seq_len, vocab_size)),\n jnp.eye(max_seq_len)]))\n\n class Unembed(hk.Module):\n\n def __call__(self, embeddings):\n return jnp.argmax(embeddings[:, :, :vocab_size], axis=-1)\n\n return token_embed, position_embed, Unembed()\n\n def test_embedding_gives_desired_result(self):\n tokens = jnp.array([[1, 2, 3]])\n vocab_size, max_seq_len, pad_token = 5, 5, 0\n\n expected_embeddings = jnp.array([[[0, 1, 0, 0, 0, 1, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 1, 0, 0, 0],\n [0, 0, 0, 1, 0, 0, 0, 1, 0, 0]]])\n\n @hk.transform\n def embed(tokens):\n transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n token_embed, position_embed, unembed = self._get_one_hot_embed_unembed(\n vocab_size, max_seq_len)\n compiled_model = model.CompiledTransformerModel(\n transformer=transformer,\n token_embed=token_embed,\n position_embed=position_embed,\n unembed=unembed,\n use_unembed_argmax=True,\n pad_token=pad_token)\n return compiled_model.embed(tokens)\n\n rng = hk.PRNGSequence(1)\n params = embed.init(next(rng), tokens)\n embeddings = embed.apply(params, next(rng), tokens)\n\n np.testing.assert_allclose(embeddings, expected_embeddings)\n\n def test_embedding_then_unembedding_gives_same_tokens(self):\n tokens = jnp.array([[1, 2, 3], [4, 5, 6], [3, 2, 4]])\n vocab_size, max_seq_len, pad_token = 10, 5, 0\n\n @hk.transform\n def embed_unembed(tokens):", "metadata": {"task_id": "deepmind--tracr/143", "ground_truth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n token_embed, position_embed, unembed = self._get_one_hot_embed_unembed(\n vocab_size, max_seq_len)\n compiled_model = model.CompiledTransformerModel(\n transformer=transformer,\n token_embed=token_embed,\n position_embed=position_embed,\n unembed=unembed,\n use_unembed_argmax=True,\n pad_token=pad_token)\n embeddings = compiled_model.embed(tokens)\n unembeddings = compiled_model.unembed(embeddings)\n return embeddings, unembeddings\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "model_test.py"], "context_start_lineno": 0, "lineno": 239, "function_name": "embed_unembed"}, "groundtruth": " transformer = model.Transformer(\n model.TransformerConfig(\n num_heads=2,\n num_layers=2,\n key_size=5,\n mlp_hidden_size=64,\n dropout_rate=0.,\n causal=False,\n layer_norm=False,\n activation_function=jax.nn.gelu))\n token_embed, position_embed, unembed = self._get_one_hot_embed_unembed(\n vocab_size, max_seq_len)\n compiled_model = model.CompiledTransformerModel(\n transformer=transformer,\n token_embed=token_embed,\n position_embed=position_embed,\n unembed=unembed,\n use_unembed_argmax=True,\n pad_token=pad_token)\n embeddings = compiled_model.embed(tokens)\n unembeddings = compiled_model.unembed(embeddings)\n return embeddings, unembeddings\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Instrumented attention layer (forked from the Haiku library implementation).\n\"\"\"\n\nfrom typing import Optional\nimport warnings\n\nimport chex\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n\n@chex.dataclass\nclass AttentionOutput:\n out: jax.Array # [..., T', D']\n logits: jax.Array # [..., H, T', T]\n\n\nclass MultiHeadAttention(hk.Module):\n \"\"\"Multi-headed attention (MHA) module.\n\n This module is intended for attending over sequences of vectors.\n\n Rough sketch:\n - Compute keys (K), queries (Q), and values (V) as projections of inputs.\n - Attention weights are computed as W = softmax(QK^T / sqrt(key_size)).\n - Output is another projection of WV^T.\n\n For more detail, see the original Transformer paper:\n \"Attention is all you need\" https://arxiv.org/abs/1706.03762.\n\n Glossary of shapes:\n - T: Sequence length.\n - D: Vector (embedding) size.\n - H: Number of attention heads.\n \"\"\"\n\n def __init__(\n self,\n num_heads: int,\n key_size: int,\n # TODO(b/240019186): Remove `w_init_scale`.\n w_init_scale: Optional[float] = None,\n *,\n w_init: Optional[hk.initializers.Initializer] = None,\n value_size: Optional[int] = None,\n model_size: Optional[int] = None,\n name: Optional[str] = None,\n ):\n \"\"\"Initialises the module.\n\n Args:\n num_heads: Number of independent attention heads (H).\n key_size: The size of keys (K) and queries used for attention.\n w_init_scale: DEPRECATED. Please use w_init instead.\n w_init: Initialiser for weights in the linear map.\n value_size: Optional size of the value projection (V). If None, defaults\n to the key size (K).\n model_size: Optional size of the output embedding (D'). If None, defaults\n to the key size multiplied by the number of heads (K * H).\n name: Optional name for this module.\n \"\"\"", "metadata": {"task_id": "deepmind--tracr/144", "ground_truth": " super().__init__(name=name)\n self.num_heads = num_heads\n self.key_size = key_size\n self.value_size = value_size or key_size\n self.model_size = model_size or key_size * num_heads\n\n # Backwards-compatibility for w_init_scale.\n if w_init_scale is not None:\n warnings.warn(\n \"w_init_scale is deprecated; please pass an explicit weight \"\n \"initialiser instead.\", DeprecationWarning)\n if w_init and w_init_scale:\n raise ValueError(\"Please provide only `w_init`, not `w_init_scale`.\")\n if w_init is None and w_init_scale is None:\n raise ValueError(\"Please provide a weight initializer: `w_init`.\")\n if w_init is None:\n w_init = hk.initializers.VarianceScaling(w_init_scale)\n self.w_init = w_init\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "attention.py"], "context_start_lineno": 0, "lineno": 77, "function_name": "__init__"}, "groundtruth": " super().__init__(name=name)\n self.num_heads = num_heads\n self.key_size = key_size\n self.value_size = value_size or key_size\n self.model_size = model_size or key_size * num_heads\n\n # Backwards-compatibility for w_init_scale.\n if w_init_scale is not None:\n warnings.warn(\n \"w_init_scale is deprecated; please pass an explicit weight \"\n \"initialiser instead.\", DeprecationWarning)\n if w_init and w_init_scale:\n raise ValueError(\"Please provide only `w_init`, not `w_init_scale`.\")\n if w_init is None and w_init_scale is None:\n raise ValueError(\"Please provide a weight initializer: `w_init`.\")\n if w_init is None:\n w_init = hk.initializers.VarianceScaling(w_init_scale)\n self.w_init = w_init\n"} +{"prompt": "# Copyright 2022 DeepMind Technologies Limited. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Instrumented attention layer (forked from the Haiku library implementation).\n\"\"\"\n\nfrom typing import Optional\nimport warnings\n\nimport chex\nimport haiku as hk\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\n\n@chex.dataclass\nclass AttentionOutput:\n out: jax.Array # [..., T', D']\n logits: jax.Array # [..., H, T', T]\n\n\nclass MultiHeadAttention(hk.Module):\n \"\"\"Multi-headed attention (MHA) module.\n\n This module is intended for attending over sequences of vectors.\n\n Rough sketch:\n - Compute keys (K), queries (Q), and values (V) as projections of inputs.\n - Attention weights are computed as W = softmax(QK^T / sqrt(key_size)).\n - Output is another projection of WV^T.\n\n For more detail, see the original Transformer paper:\n \"Attention is all you need\" https://arxiv.org/abs/1706.03762.\n\n Glossary of shapes:\n - T: Sequence length.\n - D: Vector (embedding) size.\n - H: Number of attention heads.\n \"\"\"\n\n def __init__(\n self,\n num_heads: int,\n key_size: int,\n # TODO(b/240019186): Remove `w_init_scale`.\n w_init_scale: Optional[float] = None,\n *,\n w_init: Optional[hk.initializers.Initializer] = None,\n value_size: Optional[int] = None,\n model_size: Optional[int] = None,\n name: Optional[str] = None,\n ):\n \"\"\"Initialises the module.\n\n Args:\n num_heads: Number of independent attention heads (H).\n key_size: The size of keys (K) and queries used for attention.\n w_init_scale: DEPRECATED. Please use w_init instead.\n w_init: Initialiser for weights in the linear map.\n value_size: Optional size of the value projection (V). If None, defaults\n to the key size (K).\n model_size: Optional size of the output embedding (D'). If None, defaults\n to the key size multiplied by the number of heads (K * H).\n name: Optional name for this module.\n \"\"\"\n super().__init__(name=name)\n self.num_heads = num_heads\n self.key_size = key_size\n self.value_size = value_size or key_size\n self.model_size = model_size or key_size * num_heads\n\n # Backwards-compatibility for w_init_scale.\n if w_init_scale is not None:\n warnings.warn(\n \"w_init_scale is deprecated; please pass an explicit weight \"\n \"initialiser instead.\", DeprecationWarning)\n if w_init and w_init_scale:\n raise ValueError(\"Please provide only `w_init`, not `w_init_scale`.\")\n if w_init is None and w_init_scale is None:\n raise ValueError(\"Please provide a weight initializer: `w_init`.\")\n if w_init is None:\n w_init = hk.initializers.VarianceScaling(w_init_scale)\n self.w_init = w_init\n\n def __call__(\n self,\n query: jnp.ndarray,\n key: jnp.ndarray,\n value: jnp.ndarray,\n mask: Optional[jnp.ndarray] = None,\n ) -> AttentionOutput:\n \"\"\"Computes (optionally masked) MHA with queries, keys & values.\n\n This module broadcasts over zero or more 'batch-like' leading dimensions.\n\n Args:\n query: Embeddings sequence used to compute queries; shape [..., T', D_q].\n key: Embeddings sequence used to compute keys; shape [..., T, D_k].\n value: Embeddings sequence used to compute values; shape [..., T, D_v].\n mask: Optional mask applied to attention weights; shape [..., H=1, T', T].\n\n Returns:\n A new sequence of embeddings, consisting of a projection of the\n attention-weighted value projections; shape [..., T', D'].\n \"\"\"\n\n # In shape hints below, we suppress the leading dims [...] for brevity.\n # Hence e.g. [A, B] should be read in every case as [..., A, B].\n *leading_dims, sequence_length, _ = query.shape\n projection = self._linear_projection\n\n # Compute key/query/values (overload K/Q/V to denote the respective sizes).\n query_heads = projection(query, self.key_size, \"query\") # [T', H, Q=K]\n key_heads = projection(key, self.key_size, \"key\") # [T, H, K]\n value_heads = projection(value, self.value_size, \"value\") # [T, H, V]\n\n # Compute attention weights.\n attn_logits = jnp.einsum(\"...thd,...Thd->...htT\", query_heads, key_heads)\n attn_logits = attn_logits / np.sqrt(self.key_size).astype(key.dtype)\n if mask is not None:\n if mask.ndim != attn_logits.ndim:\n raise ValueError(\n f\"Mask dimensionality {mask.ndim} must match logits dimensionality \"\n f\"{attn_logits.ndim}.\")\n attn_logits = jnp.where(mask, attn_logits, -1e30)\n attn_weights = jax.nn.softmax(attn_logits) # [H, T', T]\n\n # Weight the values by the attention and flatten the head vectors.\n attn = jnp.einsum(\"...htT,...Thd->...thd\", attn_weights, value_heads)\n attn = jnp.reshape(attn, (*leading_dims, sequence_length, -1)) # [T', H*V]\n\n # Apply another projection to get the final embeddings.\n final_projection = hk.Linear(self.model_size, w_init=self.w_init)\n return AttentionOutput(\n out=final_projection(attn),\n logits=attn_logits,\n )\n\n @hk.transparent\n def _linear_projection(\n self,\n x: jnp.ndarray,\n head_size: int,\n name: Optional[str] = None,\n ) -> jnp.ndarray:", "metadata": {"task_id": "deepmind--tracr/145", "ground_truth": " y = hk.Linear(self.num_heads * head_size, w_init=self.w_init, name=name)(x)\n *leading_dims, _ = x.shape\n return y.reshape((*leading_dims, self.num_heads, head_size))\n", "fpath_tuple": ["deepmind_tracr", "tracr", "transformer", "attention.py"], "context_start_lineno": 0, "lineno": 157, "function_name": "_linear_projection"}, "groundtruth": " y = hk.Linear(self.num_heads * head_size, w_init=self.w_init, name=name)(x)\n *leading_dims, _ = x.shape\n return y.reshape((*leading_dims, self.num_heads, head_size))\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport abc\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.patch.data_loader import get_distributed_data_loader\nfrom betty.patch.optimizer import patch_optimizer\nfrom betty.patch.scheduler import patch_scheduler\nfrom betty.configs import Config\nfrom betty.hypergradient import get_grads\nfrom betty.utils import convert_tensor, log_from_loss_dict\n\n\nclass Problem:\n \"\"\"\n This is the base class for an optimization problem in multilevel optimization.\n Specifically, each problem is defined by the parameter (or module), the sets of the upper\n and lower constraining problems, the dataset, the loss function, the optimizer, and other\n optimization configurations (e.g. best-response Jacobian calculation algorithm, number of\n unrolling steps, etc.).\n \"\"\"\n\n def __init__(\n self,\n name,\n config=None,\n module=None,\n optimizer=None,\n scheduler=None,\n train_data_loader=None,\n extra_config=None,\n ):\n # basic configurations\n self._name = name\n self._config = config if config is not None else Config()\n self.cfg = extra_config\n\n # device\n self.device = None\n\n # distributed\n self._strategy = None\n self.accelerator = None\n self._distributed = False\n self._backend = None\n self._world_size = None\n self._rank = None\n self._local_rank = None\n\n # computation graph depedency\n self._parents = []\n self._children = []\n self._paths = []\n\n # data loader\n self.train_data_loader = train_data_loader\n self.train_data_iterator = None\n self.cur_batch = None\n self.epoch_counter = None\n\n # module\n self.module = module\n\n # optimizer & lr scheduler\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n # environment\n self.env = None\n\n # fp16 scaler\n self._fp16 = config.fp16\n self.scaler = None\n if self._fp16:\n self.initial_dynamic_scale = config.initial_dynamic_scale\n self.scale_factor = config.scale_factor\n\n # gradient accumulation\n self.gas = config.gradient_accumulation\n\n # gradient clipping\n self.gradient_clipping = config.gradient_clipping\n\n # warmup\n self.warmup_steps = config.warmup_steps\n\n # logger\n self.logger = None\n self.log_step = config.log_step\n self.log_local_step = config.log_local_step\n\n # step counter\n self._count = 0\n self._global_step = 0\n\n # misc\n self._leaf = False\n self._first_order = False\n self._retain_graph = config.retain_graph\n self._allow_unused = config.allow_unused\n self._unroll_steps = config.unroll_steps\n self._roll_back = False\n self._inner_loop_start = True\n self._training = True\n self.ready = None\n\n def initialize(self):\n \"\"\"\n ``initialize`` patches/sets up module, optimizer, data loader, etc. after compiling a\n user-provided configuration (e.g., fp16 training, iterative differentiation)\n \"\"\"\n # initialize update ready to False\n self.ready = [False for _ in range(len(self._children))]\n\n # compile parents configurations\n first_order = []\n for problem in self._parents:\n parent_config = problem.config\n first_order.append(parent_config.first_order)\n self._first_order = all(first_order)\n\n # set inner_loop_start to True\n self._inner_loop_start = True\n\n # accelerate\n if self._strategy == \"accelerate\":\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n\n # set up data loader\n if self.is_implemented(\"configure_train_data_loader\"):\n if self.train_data_loader is None:\n self.train_data_loader = self.configure_train_data_loader()\n if self.train_data_loader is not None:\n if not isinstance(self.train_data_loader, tuple):\n self.train_data_loader = (self.train_data_loader,)\n else:\n assert self.is_implemented(\"get_batch\")\n\n # set up module\n if self.is_implemented(\"configure_module\"):\n if self.module is None:\n self.module = self.configure_module()\n assert self.module is not None, \"Module must be specified!\"\n\n # set up optimizer\n if self.is_implemented(\"configure_optimizer\"):\n if self.optimizer is None:\n self.optimizer = self.configure_optimizer()\n\n # set up lr scheduler\n if self.is_implemented(\"configure_scheduler\"):\n if self.scheduler is None:\n self.scheduler = self.configure_scheduler()\n\n # set up fp16 training\n if self._is_default_fp16():\n assert torch.cuda.is_available()\n scaler_cls = torch.cuda.amp.GradScaler\n if self._strategy == \"fsdp\":\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n scaler_cls = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/0", "ground_truth": " self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 0, "lineno": 201, "function_name": "patch_everything"}, "groundtruth": " self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport abc\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.patch.data_loader import get_distributed_data_loader\nfrom betty.patch.optimizer import patch_optimizer\nfrom betty.patch.scheduler import patch_scheduler\nfrom betty.configs import Config\nfrom betty.hypergradient import get_grads\nfrom betty.utils import convert_tensor, log_from_loss_dict\n\n\nclass Problem:\n \"\"\"\n This is the base class for an optimization problem in multilevel optimization.\n Specifically, each problem is defined by the parameter (or module), the sets of the upper\n and lower constraining problems, the dataset, the loss function, the optimizer, and other\n optimization configurations (e.g. best-response Jacobian calculation algorithm, number of\n unrolling steps, etc.).\n \"\"\"\n\n def __init__(\n self,\n name,\n config=None,\n module=None,\n optimizer=None,\n scheduler=None,\n train_data_loader=None,\n extra_config=None,\n ):\n # basic configurations\n self._name = name\n self._config = config if config is not None else Config()\n self.cfg = extra_config\n\n # device\n self.device = None\n\n # distributed\n self._strategy = None\n self.accelerator = None\n self._distributed = False\n self._backend = None\n self._world_size = None\n self._rank = None\n self._local_rank = None\n\n # computation graph depedency\n self._parents = []\n self._children = []\n self._paths = []\n\n # data loader\n self.train_data_loader = train_data_loader\n self.train_data_iterator = None\n self.cur_batch = None\n self.epoch_counter = None\n\n # module\n self.module = module\n\n # optimizer & lr scheduler\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n # environment\n self.env = None\n\n # fp16 scaler\n self._fp16 = config.fp16\n self.scaler = None\n if self._fp16:\n self.initial_dynamic_scale = config.initial_dynamic_scale\n self.scale_factor = config.scale_factor\n\n # gradient accumulation\n self.gas = config.gradient_accumulation\n\n # gradient clipping\n self.gradient_clipping = config.gradient_clipping\n\n # warmup\n self.warmup_steps = config.warmup_steps\n\n # logger\n self.logger = None\n self.log_step = config.log_step\n self.log_local_step = config.log_local_step\n\n # step counter\n self._count = 0\n self._global_step = 0\n\n # misc\n self._leaf = False\n self._first_order = False\n self._retain_graph = config.retain_graph\n self._allow_unused = config.allow_unused\n self._unroll_steps = config.unroll_steps\n self._roll_back = False\n self._inner_loop_start = True\n self._training = True\n self.ready = None\n\n def initialize(self):\n \"\"\"\n ``initialize`` patches/sets up module, optimizer, data loader, etc. after compiling a\n user-provided configuration (e.g., fp16 training, iterative differentiation)\n \"\"\"\n # initialize update ready to False\n self.ready = [False for _ in range(len(self._children))]\n\n # compile parents configurations\n first_order = []\n for problem in self._parents:\n parent_config = problem.config\n first_order.append(parent_config.first_order)\n self._first_order = all(first_order)\n\n # set inner_loop_start to True\n self._inner_loop_start = True\n\n # accelerate\n if self._strategy == \"accelerate\":\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n\n # set up data loader\n if self.is_implemented(\"configure_train_data_loader\"):\n if self.train_data_loader is None:\n self.train_data_loader = self.configure_train_data_loader()\n if self.train_data_loader is not None:\n if not isinstance(self.train_data_loader, tuple):\n self.train_data_loader = (self.train_data_loader,)\n else:\n assert self.is_implemented(\"get_batch\")\n\n # set up module\n if self.is_implemented(\"configure_module\"):\n if self.module is None:\n self.module = self.configure_module()\n assert self.module is not None, \"Module must be specified!\"\n\n # set up optimizer\n if self.is_implemented(\"configure_optimizer\"):\n if self.optimizer is None:\n self.optimizer = self.configure_optimizer()\n\n # set up lr scheduler\n if self.is_implemented(\"configure_scheduler\"):\n if self.scheduler is None:\n self.scheduler = self.configure_scheduler()\n\n # set up fp16 training\n if self._is_default_fp16():\n assert torch.cuda.is_available()\n scaler_cls = torch.cuda.amp.GradScaler\n if self._strategy == \"fsdp\":\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n scaler_cls = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/1", "ground_truth": " params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 0, "lineno": 236, "function_name": "patch_optimizer"}, "groundtruth": " params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport abc\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.patch.data_loader import get_distributed_data_loader\nfrom betty.patch.optimizer import patch_optimizer\nfrom betty.patch.scheduler import patch_scheduler\nfrom betty.configs import Config\nfrom betty.hypergradient import get_grads\nfrom betty.utils import convert_tensor, log_from_loss_dict\n\n\nclass Problem:\n \"\"\"\n This is the base class for an optimization problem in multilevel optimization.\n Specifically, each problem is defined by the parameter (or module), the sets of the upper\n and lower constraining problems, the dataset, the loss function, the optimizer, and other\n optimization configurations (e.g. best-response Jacobian calculation algorithm, number of\n unrolling steps, etc.).\n \"\"\"\n\n def __init__(\n self,\n name,\n config=None,\n module=None,\n optimizer=None,\n scheduler=None,\n train_data_loader=None,\n extra_config=None,\n ):\n # basic configurations\n self._name = name\n self._config = config if config is not None else Config()\n self.cfg = extra_config\n\n # device\n self.device = None\n\n # distributed\n self._strategy = None\n self.accelerator = None\n self._distributed = False\n self._backend = None\n self._world_size = None\n self._rank = None\n self._local_rank = None\n\n # computation graph depedency\n self._parents = []\n self._children = []\n self._paths = []\n\n # data loader\n self.train_data_loader = train_data_loader\n self.train_data_iterator = None\n self.cur_batch = None\n self.epoch_counter = None\n\n # module\n self.module = module\n\n # optimizer & lr scheduler\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n # environment\n self.env = None\n\n # fp16 scaler\n self._fp16 = config.fp16\n self.scaler = None\n if self._fp16:\n self.initial_dynamic_scale = config.initial_dynamic_scale\n self.scale_factor = config.scale_factor\n\n # gradient accumulation\n self.gas = config.gradient_accumulation\n\n # gradient clipping\n self.gradient_clipping = config.gradient_clipping\n\n # warmup\n self.warmup_steps = config.warmup_steps\n\n # logger\n self.logger = None\n self.log_step = config.log_step\n self.log_local_step = config.log_local_step\n\n # step counter\n self._count = 0\n self._global_step = 0\n\n # misc\n self._leaf = False\n self._first_order = False\n self._retain_graph = config.retain_graph\n self._allow_unused = config.allow_unused\n self._unroll_steps = config.unroll_steps\n self._roll_back = False\n self._inner_loop_start = True\n self._training = True\n self.ready = None\n\n def initialize(self):\n \"\"\"\n ``initialize`` patches/sets up module, optimizer, data loader, etc. after compiling a\n user-provided configuration (e.g., fp16 training, iterative differentiation)\n \"\"\"\n # initialize update ready to False\n self.ready = [False for _ in range(len(self._children))]\n\n # compile parents configurations\n first_order = []\n for problem in self._parents:\n parent_config = problem.config\n first_order.append(parent_config.first_order)\n self._first_order = all(first_order)\n\n # set inner_loop_start to True\n self._inner_loop_start = True\n\n # accelerate\n if self._strategy == \"accelerate\":\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n\n # set up data loader\n if self.is_implemented(\"configure_train_data_loader\"):\n if self.train_data_loader is None:\n self.train_data_loader = self.configure_train_data_loader()\n if self.train_data_loader is not None:\n if not isinstance(self.train_data_loader, tuple):\n self.train_data_loader = (self.train_data_loader,)\n else:\n assert self.is_implemented(\"get_batch\")\n\n # set up module\n if self.is_implemented(\"configure_module\"):\n if self.module is None:\n self.module = self.configure_module()\n assert self.module is not None, \"Module must be specified!\"\n\n # set up optimizer\n if self.is_implemented(\"configure_optimizer\"):\n if self.optimizer is None:\n self.optimizer = self.configure_optimizer()\n\n # set up lr scheduler\n if self.is_implemented(\"configure_scheduler\"):\n if self.scheduler is None:\n self.scheduler = self.configure_scheduler()\n\n # set up fp16 training\n if self._is_default_fp16():\n assert torch.cuda.is_available()\n scaler_cls = torch.cuda.amp.GradScaler\n if self._strategy == \"fsdp\":\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n scaler_cls = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/2", "ground_truth": " if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 0, "lineno": 257, "function_name": "patch_data_loader"}, "groundtruth": " if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport abc\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.patch.data_loader import get_distributed_data_loader\nfrom betty.patch.optimizer import patch_optimizer\nfrom betty.patch.scheduler import patch_scheduler\nfrom betty.configs import Config\nfrom betty.hypergradient import get_grads\nfrom betty.utils import convert_tensor, log_from_loss_dict\n\n\nclass Problem:\n \"\"\"\n This is the base class for an optimization problem in multilevel optimization.\n Specifically, each problem is defined by the parameter (or module), the sets of the upper\n and lower constraining problems, the dataset, the loss function, the optimizer, and other\n optimization configurations (e.g. best-response Jacobian calculation algorithm, number of\n unrolling steps, etc.).\n \"\"\"\n\n def __init__(\n self,\n name,\n config=None,\n module=None,\n optimizer=None,\n scheduler=None,\n train_data_loader=None,\n extra_config=None,\n ):\n # basic configurations\n self._name = name\n self._config = config if config is not None else Config()\n self.cfg = extra_config\n\n # device\n self.device = None\n\n # distributed\n self._strategy = None\n self.accelerator = None\n self._distributed = False\n self._backend = None\n self._world_size = None\n self._rank = None\n self._local_rank = None\n\n # computation graph depedency\n self._parents = []\n self._children = []\n self._paths = []\n\n # data loader\n self.train_data_loader = train_data_loader\n self.train_data_iterator = None\n self.cur_batch = None\n self.epoch_counter = None\n\n # module\n self.module = module\n\n # optimizer & lr scheduler\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n # environment\n self.env = None\n\n # fp16 scaler\n self._fp16 = config.fp16\n self.scaler = None\n if self._fp16:\n self.initial_dynamic_scale = config.initial_dynamic_scale\n self.scale_factor = config.scale_factor\n\n # gradient accumulation\n self.gas = config.gradient_accumulation\n\n # gradient clipping\n self.gradient_clipping = config.gradient_clipping\n\n # warmup\n self.warmup_steps = config.warmup_steps\n\n # logger\n self.logger = None\n self.log_step = config.log_step\n self.log_local_step = config.log_local_step\n\n # step counter\n self._count = 0\n self._global_step = 0\n\n # misc\n self._leaf = False\n self._first_order = False\n self._retain_graph = config.retain_graph\n self._allow_unused = config.allow_unused\n self._unroll_steps = config.unroll_steps\n self._roll_back = False\n self._inner_loop_start = True\n self._training = True\n self.ready = None\n\n def initialize(self):\n \"\"\"\n ``initialize`` patches/sets up module, optimizer, data loader, etc. after compiling a\n user-provided configuration (e.g., fp16 training, iterative differentiation)\n \"\"\"\n # initialize update ready to False\n self.ready = [False for _ in range(len(self._children))]\n\n # compile parents configurations\n first_order = []\n for problem in self._parents:\n parent_config = problem.config\n first_order.append(parent_config.first_order)\n self._first_order = all(first_order)\n\n # set inner_loop_start to True\n self._inner_loop_start = True\n\n # accelerate\n if self._strategy == \"accelerate\":\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n\n # set up data loader\n if self.is_implemented(\"configure_train_data_loader\"):\n if self.train_data_loader is None:\n self.train_data_loader = self.configure_train_data_loader()\n if self.train_data_loader is not None:\n if not isinstance(self.train_data_loader, tuple):\n self.train_data_loader = (self.train_data_loader,)\n else:\n assert self.is_implemented(\"get_batch\")\n\n # set up module\n if self.is_implemented(\"configure_module\"):\n if self.module is None:\n self.module = self.configure_module()\n assert self.module is not None, \"Module must be specified!\"\n\n # set up optimizer\n if self.is_implemented(\"configure_optimizer\"):\n if self.optimizer is None:\n self.optimizer = self.configure_optimizer()\n\n # set up lr scheduler\n if self.is_implemented(\"configure_scheduler\"):\n if self.scheduler is None:\n self.scheduler = self.configure_scheduler()\n\n # set up fp16 training\n if self._is_default_fp16():\n assert torch.cuda.is_available()\n scaler_cls = torch.cuda.amp.GradScaler\n if self._strategy == \"fsdp\":\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n scaler_cls = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n\n def set_module(self, module):\n \"\"\"\n Set new module for the current Problem class.\n \"\"\"\n self.module = module\n self.patch_module()\n\n def set_optimizer(self, optimizer):\n \"\"\"\n Set new optimizer for the current Problem class.\n \"\"\"\n self.optimizer = optimizer\n self.patch_optimizer()\n\n def set_scheduler(self, scheduler):\n \"\"\"\n Set new scheduler for the current Problem class.\n \"\"\"\n self.scheduler = scheduler\n self.patch_scheduler()\n\n def set_train_data_loader(self, loader, idx=0):\n \"\"\"\n Set new data loader for the current Problem class.\n \"\"\"\n self.train_data_loader[idx] = self.patch_data_loader(loader)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Users define how forward (or call) function is defined for the problem here.\n \"\"\"\n return self.module(*args, **kwargs)\n\n @abc.abstractmethod\n def training_step(self, batch):\n \"\"\"\n Users define the loss function of the problem here.\n \"\"\"\n raise NotImplementedError\n\n def training_step_exec(self, batch):", "metadata": {"task_id": "leopard-ai--betty/3", "ground_truth": " if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 0, "lineno": 312, "function_name": "training_step_exec"}, "groundtruth": " if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport sys\nimport abc\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.patch.data_loader import get_distributed_data_loader\nfrom betty.patch.optimizer import patch_optimizer\nfrom betty.patch.scheduler import patch_scheduler\nfrom betty.configs import Config\nfrom betty.hypergradient import get_grads\nfrom betty.utils import convert_tensor, log_from_loss_dict\n\n\nclass Problem:\n \"\"\"\n This is the base class for an optimization problem in multilevel optimization.\n Specifically, each problem is defined by the parameter (or module), the sets of the upper\n and lower constraining problems, the dataset, the loss function, the optimizer, and other\n optimization configurations (e.g. best-response Jacobian calculation algorithm, number of\n unrolling steps, etc.).\n \"\"\"\n\n def __init__(\n self,\n name,\n config=None,\n module=None,\n optimizer=None,\n scheduler=None,\n train_data_loader=None,\n extra_config=None,\n ):\n # basic configurations\n self._name = name\n self._config = config if config is not None else Config()\n self.cfg = extra_config\n\n # device\n self.device = None\n\n # distributed\n self._strategy = None\n self.accelerator = None\n self._distributed = False\n self._backend = None\n self._world_size = None\n self._rank = None\n self._local_rank = None\n\n # computation graph depedency\n self._parents = []\n self._children = []\n self._paths = []\n\n # data loader\n self.train_data_loader = train_data_loader\n self.train_data_iterator = None\n self.cur_batch = None\n self.epoch_counter = None\n\n # module\n self.module = module\n\n # optimizer & lr scheduler\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n # environment\n self.env = None\n\n # fp16 scaler\n self._fp16 = config.fp16\n self.scaler = None\n if self._fp16:\n self.initial_dynamic_scale = config.initial_dynamic_scale\n self.scale_factor = config.scale_factor\n\n # gradient accumulation\n self.gas = config.gradient_accumulation\n\n # gradient clipping\n self.gradient_clipping = config.gradient_clipping\n\n # warmup\n self.warmup_steps = config.warmup_steps\n\n # logger\n self.logger = None\n self.log_step = config.log_step\n self.log_local_step = config.log_local_step\n\n # step counter\n self._count = 0\n self._global_step = 0\n\n # misc\n self._leaf = False\n self._first_order = False\n self._retain_graph = config.retain_graph\n self._allow_unused = config.allow_unused\n self._unroll_steps = config.unroll_steps\n self._roll_back = False\n self._inner_loop_start = True\n self._training = True\n self.ready = None\n\n def initialize(self):\n \"\"\"\n ``initialize`` patches/sets up module, optimizer, data loader, etc. after compiling a\n user-provided configuration (e.g., fp16 training, iterative differentiation)\n \"\"\"\n # initialize update ready to False\n self.ready = [False for _ in range(len(self._children))]\n\n # compile parents configurations\n first_order = []\n for problem in self._parents:\n parent_config = problem.config\n first_order.append(parent_config.first_order)\n self._first_order = all(first_order)\n\n # set inner_loop_start to True\n self._inner_loop_start = True\n\n # accelerate\n if self._strategy == \"accelerate\":\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n\n # set up data loader\n if self.is_implemented(\"configure_train_data_loader\"):\n if self.train_data_loader is None:\n self.train_data_loader = self.configure_train_data_loader()\n if self.train_data_loader is not None:\n if not isinstance(self.train_data_loader, tuple):\n self.train_data_loader = (self.train_data_loader,)\n else:\n assert self.is_implemented(\"get_batch\")\n\n # set up module\n if self.is_implemented(\"configure_module\"):\n if self.module is None:\n self.module = self.configure_module()\n assert self.module is not None, \"Module must be specified!\"\n\n # set up optimizer\n if self.is_implemented(\"configure_optimizer\"):\n if self.optimizer is None:\n self.optimizer = self.configure_optimizer()\n\n # set up lr scheduler\n if self.is_implemented(\"configure_scheduler\"):\n if self.scheduler is None:\n self.scheduler = self.configure_scheduler()\n\n # set up fp16 training\n if self._is_default_fp16():\n assert torch.cuda.is_available()\n scaler_cls = torch.cuda.amp.GradScaler\n if self._strategy == \"fsdp\":\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n scaler_cls = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n\n def set_module(self, module):\n \"\"\"\n Set new module for the current Problem class.\n \"\"\"\n self.module = module\n self.patch_module()\n\n def set_optimizer(self, optimizer):\n \"\"\"\n Set new optimizer for the current Problem class.\n \"\"\"\n self.optimizer = optimizer\n self.patch_optimizer()\n\n def set_scheduler(self, scheduler):\n \"\"\"\n Set new scheduler for the current Problem class.\n \"\"\"\n self.scheduler = scheduler\n self.patch_scheduler()\n\n def set_train_data_loader(self, loader, idx=0):\n \"\"\"\n Set new data loader for the current Problem class.\n \"\"\"\n self.train_data_loader[idx] = self.patch_data_loader(loader)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Users define how forward (or call) function is defined for the problem here.\n \"\"\"\n return self.module(*args, **kwargs)\n\n @abc.abstractmethod\n def training_step(self, batch):\n \"\"\"\n Users define the loss function of the problem here.\n \"\"\"\n raise NotImplementedError\n\n def training_step_exec(self, batch):\n if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n\n def one_step_descent(self, batch=None):\n # load data\n if batch is None:\n self.cur_batch = self.get_batch()\n batch = self.cur_batch\n\n # calculate loss\n loss, loss_dict = self.get_loss(batch)\n\n # calculate gradient (a.k.a backward)\n self.backward(\n loss=loss,\n params=self.trainable_parameters(),\n paths=self._paths,\n create_graph=not self._first_order,\n retain_graph=self._retain_graph,\n allow_unused=self._allow_unused,\n )\n if self.is_implemented(\"grad_callback\"):\n self.grad_callback()\n\n # calculate parameter update\n if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/4", "ground_truth": " self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 0, "lineno": 432, "function_name": "step"}, "groundtruth": " self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n"} +{"prompt": ".scheduler import patch_scheduler\nfrom betty.configs import Config\nfrom betty.hypergradient import get_grads\nfrom betty.utils import convert_tensor, log_from_loss_dict\n\n\nclass Problem:\n \"\"\"\n This is the base class for an optimization problem in multilevel optimization.\n Specifically, each problem is defined by the parameter (or module), the sets of the upper\n and lower constraining problems, the dataset, the loss function, the optimizer, and other\n optimization configurations (e.g. best-response Jacobian calculation algorithm, number of\n unrolling steps, etc.).\n \"\"\"\n\n def __init__(\n self,\n name,\n config=None,\n module=None,\n optimizer=None,\n scheduler=None,\n train_data_loader=None,\n extra_config=None,\n ):\n # basic configurations\n self._name = name\n self._config = config if config is not None else Config()\n self.cfg = extra_config\n\n # device\n self.device = None\n\n # distributed\n self._strategy = None\n self.accelerator = None\n self._distributed = False\n self._backend = None\n self._world_size = None\n self._rank = None\n self._local_rank = None\n\n # computation graph depedency\n self._parents = []\n self._children = []\n self._paths = []\n\n # data loader\n self.train_data_loader = train_data_loader\n self.train_data_iterator = None\n self.cur_batch = None\n self.epoch_counter = None\n\n # module\n self.module = module\n\n # optimizer & lr scheduler\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n # environment\n self.env = None\n\n # fp16 scaler\n self._fp16 = config.fp16\n self.scaler = None\n if self._fp16:\n self.initial_dynamic_scale = config.initial_dynamic_scale\n self.scale_factor = config.scale_factor\n\n # gradient accumulation\n self.gas = config.gradient_accumulation\n\n # gradient clipping\n self.gradient_clipping = config.gradient_clipping\n\n # warmup\n self.warmup_steps = config.warmup_steps\n\n # logger\n self.logger = None\n self.log_step = config.log_step\n self.log_local_step = config.log_local_step\n\n # step counter\n self._count = 0\n self._global_step = 0\n\n # misc\n self._leaf = False\n self._first_order = False\n self._retain_graph = config.retain_graph\n self._allow_unused = config.allow_unused\n self._unroll_steps = config.unroll_steps\n self._roll_back = False\n self._inner_loop_start = True\n self._training = True\n self.ready = None\n\n def initialize(self):\n \"\"\"\n ``initialize`` patches/sets up module, optimizer, data loader, etc. after compiling a\n user-provided configuration (e.g., fp16 training, iterative differentiation)\n \"\"\"\n # initialize update ready to False\n self.ready = [False for _ in range(len(self._children))]\n\n # compile parents configurations\n first_order = []\n for problem in self._parents:\n parent_config = problem.config\n first_order.append(parent_config.first_order)\n self._first_order = all(first_order)\n\n # set inner_loop_start to True\n self._inner_loop_start = True\n\n # accelerate\n if self._strategy == \"accelerate\":\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n\n # set up data loader\n if self.is_implemented(\"configure_train_data_loader\"):\n if self.train_data_loader is None:\n self.train_data_loader = self.configure_train_data_loader()\n if self.train_data_loader is not None:\n if not isinstance(self.train_data_loader, tuple):\n self.train_data_loader = (self.train_data_loader,)\n else:\n assert self.is_implemented(\"get_batch\")\n\n # set up module\n if self.is_implemented(\"configure_module\"):\n if self.module is None:\n self.module = self.configure_module()\n assert self.module is not None, \"Module must be specified!\"\n\n # set up optimizer\n if self.is_implemented(\"configure_optimizer\"):\n if self.optimizer is None:\n self.optimizer = self.configure_optimizer()\n\n # set up lr scheduler\n if self.is_implemented(\"configure_scheduler\"):\n if self.scheduler is None:\n self.scheduler = self.configure_scheduler()\n\n # set up fp16 training\n if self._is_default_fp16():\n assert torch.cuda.is_available()\n scaler_cls = torch.cuda.amp.GradScaler\n if self._strategy == \"fsdp\":\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n scaler_cls = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n\n def set_module(self, module):\n \"\"\"\n Set new module for the current Problem class.\n \"\"\"\n self.module = module\n self.patch_module()\n\n def set_optimizer(self, optimizer):\n \"\"\"\n Set new optimizer for the current Problem class.\n \"\"\"\n self.optimizer = optimizer\n self.patch_optimizer()\n\n def set_scheduler(self, scheduler):\n \"\"\"\n Set new scheduler for the current Problem class.\n \"\"\"\n self.scheduler = scheduler\n self.patch_scheduler()\n\n def set_train_data_loader(self, loader, idx=0):\n \"\"\"\n Set new data loader for the current Problem class.\n \"\"\"\n self.train_data_loader[idx] = self.patch_data_loader(loader)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Users define how forward (or call) function is defined for the problem here.\n \"\"\"\n return self.module(*args, **kwargs)\n\n @abc.abstractmethod\n def training_step(self, batch):\n \"\"\"\n Users define the loss function of the problem here.\n \"\"\"\n raise NotImplementedError\n\n def training_step_exec(self, batch):\n if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n\n def one_step_descent(self, batch=None):\n # load data\n if batch is None:\n self.cur_batch = self.get_batch()\n batch = self.cur_batch\n\n # calculate loss\n loss, loss_dict = self.get_loss(batch)\n\n # calculate gradient (a.k.a backward)\n self.backward(\n loss=loss,\n params=self.trainable_parameters(),\n paths=self._paths,\n create_graph=not self._first_order,\n retain_graph=self._retain_graph,\n allow_unused=self._allow_unused,\n )\n if self.is_implemented(\"grad_callback\"):\n self.grad_callback()\n\n # calculate parameter update\n if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"\n self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n\n def get_batch(self):\n \"\"\"\n Load training batch from the user-provided data loader\n\n :return: New training batch\n :rtype: Any\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/5", "ground_truth": " batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 13, "lineno": 447, "function_name": "get_batch"}, "groundtruth": " batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n"} +{"prompt": " function, the optimizer, and other\n optimization configurations (e.g. best-response Jacobian calculation algorithm, number of\n unrolling steps, etc.).\n \"\"\"\n\n def __init__(\n self,\n name,\n config=None,\n module=None,\n optimizer=None,\n scheduler=None,\n train_data_loader=None,\n extra_config=None,\n ):\n # basic configurations\n self._name = name\n self._config = config if config is not None else Config()\n self.cfg = extra_config\n\n # device\n self.device = None\n\n # distributed\n self._strategy = None\n self.accelerator = None\n self._distributed = False\n self._backend = None\n self._world_size = None\n self._rank = None\n self._local_rank = None\n\n # computation graph depedency\n self._parents = []\n self._children = []\n self._paths = []\n\n # data loader\n self.train_data_loader = train_data_loader\n self.train_data_iterator = None\n self.cur_batch = None\n self.epoch_counter = None\n\n # module\n self.module = module\n\n # optimizer & lr scheduler\n self.optimizer = optimizer\n self.scheduler = scheduler\n\n # environment\n self.env = None\n\n # fp16 scaler\n self._fp16 = config.fp16\n self.scaler = None\n if self._fp16:\n self.initial_dynamic_scale = config.initial_dynamic_scale\n self.scale_factor = config.scale_factor\n\n # gradient accumulation\n self.gas = config.gradient_accumulation\n\n # gradient clipping\n self.gradient_clipping = config.gradient_clipping\n\n # warmup\n self.warmup_steps = config.warmup_steps\n\n # logger\n self.logger = None\n self.log_step = config.log_step\n self.log_local_step = config.log_local_step\n\n # step counter\n self._count = 0\n self._global_step = 0\n\n # misc\n self._leaf = False\n self._first_order = False\n self._retain_graph = config.retain_graph\n self._allow_unused = config.allow_unused\n self._unroll_steps = config.unroll_steps\n self._roll_back = False\n self._inner_loop_start = True\n self._training = True\n self.ready = None\n\n def initialize(self):\n \"\"\"\n ``initialize`` patches/sets up module, optimizer, data loader, etc. after compiling a\n user-provided configuration (e.g., fp16 training, iterative differentiation)\n \"\"\"\n # initialize update ready to False\n self.ready = [False for _ in range(len(self._children))]\n\n # compile parents configurations\n first_order = []\n for problem in self._parents:\n parent_config = problem.config\n first_order.append(parent_config.first_order)\n self._first_order = all(first_order)\n\n # set inner_loop_start to True\n self._inner_loop_start = True\n\n # accelerate\n if self._strategy == \"accelerate\":\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n\n # set up data loader\n if self.is_implemented(\"configure_train_data_loader\"):\n if self.train_data_loader is None:\n self.train_data_loader = self.configure_train_data_loader()\n if self.train_data_loader is not None:\n if not isinstance(self.train_data_loader, tuple):\n self.train_data_loader = (self.train_data_loader,)\n else:\n assert self.is_implemented(\"get_batch\")\n\n # set up module\n if self.is_implemented(\"configure_module\"):\n if self.module is None:\n self.module = self.configure_module()\n assert self.module is not None, \"Module must be specified!\"\n\n # set up optimizer\n if self.is_implemented(\"configure_optimizer\"):\n if self.optimizer is None:\n self.optimizer = self.configure_optimizer()\n\n # set up lr scheduler\n if self.is_implemented(\"configure_scheduler\"):\n if self.scheduler is None:\n self.scheduler = self.configure_scheduler()\n\n # set up fp16 training\n if self._is_default_fp16():\n assert torch.cuda.is_available()\n scaler_cls = torch.cuda.amp.GradScaler\n if self._strategy == \"fsdp\":\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n scaler_cls = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n\n def set_module(self, module):\n \"\"\"\n Set new module for the current Problem class.\n \"\"\"\n self.module = module\n self.patch_module()\n\n def set_optimizer(self, optimizer):\n \"\"\"\n Set new optimizer for the current Problem class.\n \"\"\"\n self.optimizer = optimizer\n self.patch_optimizer()\n\n def set_scheduler(self, scheduler):\n \"\"\"\n Set new scheduler for the current Problem class.\n \"\"\"\n self.scheduler = scheduler\n self.patch_scheduler()\n\n def set_train_data_loader(self, loader, idx=0):\n \"\"\"\n Set new data loader for the current Problem class.\n \"\"\"\n self.train_data_loader[idx] = self.patch_data_loader(loader)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Users define how forward (or call) function is defined for the problem here.\n \"\"\"\n return self.module(*args, **kwargs)\n\n @abc.abstractmethod\n def training_step(self, batch):\n \"\"\"\n Users define the loss function of the problem here.\n \"\"\"\n raise NotImplementedError\n\n def training_step_exec(self, batch):\n if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n\n def one_step_descent(self, batch=None):\n # load data\n if batch is None:\n self.cur_batch = self.get_batch()\n batch = self.cur_batch\n\n # calculate loss\n loss, loss_dict = self.get_loss(batch)\n\n # calculate gradient (a.k.a backward)\n self.backward(\n loss=loss,\n params=self.trainable_parameters(),\n paths=self._paths,\n create_graph=not self._first_order,\n retain_graph=self._retain_graph,\n allow_unused=self._allow_unused,\n )\n if self.is_implemented(\"grad_callback\"):\n self.grad_callback()\n\n # calculate parameter update\n if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"\n self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n\n def get_batch(self):\n \"\"\"\n Load training batch from the user-provided data loader\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n\n def get_batch_single_loader(self, idx):\n \"\"\"\n Load training batch from one of the user-provided data loader(s)\n\n :return: New training batch\n :rtype: Any\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/6", "ground_truth": " data_iterator = self.train_data_iterator[idx]\n try:\n batch = next(data_iterator)\n except StopIteration:\n if idx == 0:\n self.epoch_callback_exec()\n self.epoch_counter[idx] += 1\n train_data_loader = self.train_data_loader[idx]\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n train_data_loader.set_epoch(self.epoch_counter[idx])\n self.train_data_iterator[idx] = iter(train_data_loader)\n batch = next(self.train_data_iterator[idx])\n if not isinstance(batch, dict):\n batch = tuple(\n convert_tensor(value, self.device, self._is_default_fp16())\n for value in batch\n )\n else:\n for key, value in batch.items():\n batch[key] = convert_tensor(value, self.device, self._is_default_fp16())\n\n return batch\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 23, "lineno": 460, "function_name": "get_batch_single_loader"}, "groundtruth": " data_iterator = self.train_data_iterator[idx]\n try:\n batch = next(data_iterator)\n except StopIteration:\n if idx == 0:\n self.epoch_callback_exec()\n self.epoch_counter[idx] += 1\n train_data_loader = self.train_data_loader[idx]\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n train_data_loader.set_epoch(self.epoch_counter[idx])\n self.train_data_iterator[idx] = iter(train_data_loader)\n batch = next(self.train_data_iterator[idx])\n if not isinstance(batch, dict):\n batch = tuple(\n convert_tensor(value, self.device, self._is_default_fp16())\n for value in batch\n )\n else:\n for key, value in batch.items():\n batch[key] = convert_tensor(value, self.device, self._is_default_fp16())\n\n return batch\n"} +{"prompt": " # environment\n self.env = None\n\n # fp16 scaler\n self._fp16 = config.fp16\n self.scaler = None\n if self._fp16:\n self.initial_dynamic_scale = config.initial_dynamic_scale\n self.scale_factor = config.scale_factor\n\n # gradient accumulation\n self.gas = config.gradient_accumulation\n\n # gradient clipping\n self.gradient_clipping = config.gradient_clipping\n\n # warmup\n self.warmup_steps = config.warmup_steps\n\n # logger\n self.logger = None\n self.log_step = config.log_step\n self.log_local_step = config.log_local_step\n\n # step counter\n self._count = 0\n self._global_step = 0\n\n # misc\n self._leaf = False\n self._first_order = False\n self._retain_graph = config.retain_graph\n self._allow_unused = config.allow_unused\n self._unroll_steps = config.unroll_steps\n self._roll_back = False\n self._inner_loop_start = True\n self._training = True\n self.ready = None\n\n def initialize(self):\n \"\"\"\n ``initialize`` patches/sets up module, optimizer, data loader, etc. after compiling a\n user-provided configuration (e.g., fp16 training, iterative differentiation)\n \"\"\"\n # initialize update ready to False\n self.ready = [False for _ in range(len(self._children))]\n\n # compile parents configurations\n first_order = []\n for problem in self._parents:\n parent_config = problem.config\n first_order.append(parent_config.first_order)\n self._first_order = all(first_order)\n\n # set inner_loop_start to True\n self._inner_loop_start = True\n\n # accelerate\n if self._strategy == \"accelerate\":\n from accelerate import Accelerator\n\n self.accelerator = Accelerator()\n\n # set up data loader\n if self.is_implemented(\"configure_train_data_loader\"):\n if self.train_data_loader is None:\n self.train_data_loader = self.configure_train_data_loader()\n if self.train_data_loader is not None:\n if not isinstance(self.train_data_loader, tuple):\n self.train_data_loader = (self.train_data_loader,)\n else:\n assert self.is_implemented(\"get_batch\")\n\n # set up module\n if self.is_implemented(\"configure_module\"):\n if self.module is None:\n self.module = self.configure_module()\n assert self.module is not None, \"Module must be specified!\"\n\n # set up optimizer\n if self.is_implemented(\"configure_optimizer\"):\n if self.optimizer is None:\n self.optimizer = self.configure_optimizer()\n\n # set up lr scheduler\n if self.is_implemented(\"configure_scheduler\"):\n if self.scheduler is None:\n self.scheduler = self.configure_scheduler()\n\n # set up fp16 training\n if self._is_default_fp16():\n assert torch.cuda.is_available()\n scaler_cls = torch.cuda.amp.GradScaler\n if self._strategy == \"fsdp\":\n from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler\n\n scaler_cls = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n\n def set_module(self, module):\n \"\"\"\n Set new module for the current Problem class.\n \"\"\"\n self.module = module\n self.patch_module()\n\n def set_optimizer(self, optimizer):\n \"\"\"\n Set new optimizer for the current Problem class.\n \"\"\"\n self.optimizer = optimizer\n self.patch_optimizer()\n\n def set_scheduler(self, scheduler):\n \"\"\"\n Set new scheduler for the current Problem class.\n \"\"\"\n self.scheduler = scheduler\n self.patch_scheduler()\n\n def set_train_data_loader(self, loader, idx=0):\n \"\"\"\n Set new data loader for the current Problem class.\n \"\"\"\n self.train_data_loader[idx] = self.patch_data_loader(loader)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Users define how forward (or call) function is defined for the problem here.\n \"\"\"\n return self.module(*args, **kwargs)\n\n @abc.abstractmethod\n def training_step(self, batch):\n \"\"\"\n Users define the loss function of the problem here.\n \"\"\"\n raise NotImplementedError\n\n def training_step_exec(self, batch):\n if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n\n def one_step_descent(self, batch=None):\n # load data\n if batch is None:\n self.cur_batch = self.get_batch()\n batch = self.cur_batch\n\n # calculate loss\n loss, loss_dict = self.get_loss(batch)\n\n # calculate gradient (a.k.a backward)\n self.backward(\n loss=loss,\n params=self.trainable_parameters(),\n paths=self._paths,\n create_graph=not self._first_order,\n retain_graph=self._retain_graph,\n allow_unused=self._allow_unused,\n )\n if self.is_implemented(\"grad_callback\"):\n self.grad_callback()\n\n # calculate parameter update\n if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"\n self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n\n def get_batch(self):\n \"\"\"\n Load training batch from the user-provided data loader\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n\n def get_batch_single_loader(self, idx):\n \"\"\"\n Load training batch from one of the user-provided data loader(s)\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n data_iterator = self.train_data_iterator[idx]\n try:\n batch = next(data_iterator)\n except StopIteration:\n if idx == 0:\n self.epoch_callback_exec()\n self.epoch_counter[idx] += 1\n train_data_loader = self.train_data_loader[idx]\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n train_data_loader.set_epoch(self.epoch_counter[idx])\n self.train_data_iterator[idx] = iter(train_data_loader)\n batch = next(self.train_data_iterator[idx])\n if not isinstance(batch, dict):\n batch = tuple(\n convert_tensor(value, self.device, self._is_default_fp16())\n for value in batch\n )\n else:\n for key, value in batch.items():\n batch[key] = convert_tensor(value, self.device, self._is_default_fp16())\n\n return batch\n\n def get_loss(self, batch):\n \"\"\"\n Calculate loss and log metrics for the current batch based on the user-defined loss\n function.\n\n :return: loss and log metrics (e.g. classification accuracy)\n :rtype: dict\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/7", "ground_truth": " maybe_loss_dict = self.training_step_exec(batch)\n is_dict = isinstance(maybe_loss_dict, dict)\n loss = maybe_loss_dict[\"loss\"] if is_dict else maybe_loss_dict\n loss_no_scale = loss.item()\n if self._is_default_fp16():\n loss = self.scaler.scale(loss)\n loss = loss / self.gas\n\n # construct loss dict\n loss_dict = {\"loss\": loss_no_scale}\n if is_dict:\n for key, value in maybe_loss_dict.items():\n if key != \"loss\":\n loss_dict[key] = value\n\n return loss, loss_dict\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 73, "lineno": 491, "function_name": "get_loss"}, "groundtruth": " maybe_loss_dict = self.training_step_exec(batch)\n is_dict = isinstance(maybe_loss_dict, dict)\n loss = maybe_loss_dict[\"loss\"] if is_dict else maybe_loss_dict\n loss_no_scale = loss.item()\n if self._is_default_fp16():\n loss = self.scaler.scale(loss)\n loss = loss / self.gas\n\n # construct loss dict\n loss_dict = {\"loss\": loss_no_scale}\n if is_dict:\n for key, value in maybe_loss_dict.items():\n if key != \"loss\":\n loss_dict[key] = value\n\n return loss, loss_dict\n"} +{"prompt": "s = ShardedGradScaler\n self.scaler = scaler_cls(\n init_scale=self.initial_dynamic_scale, growth_factor=self.scale_factor\n )\n\n # patch module, optimizer, data loader, and scheduler\n self.patch_everything()\n\n # make train_data_loader as iterator\n if self.train_data_loader is not None:\n self.train_data_iterator = []\n self.epoch_counter = []\n for train_data_loader in self.train_data_loader:\n self.train_data_iterator.append(iter(train_data_loader))\n self.epoch_counter.append(0)\n\n # Logging INFO\n path_str = [[node.name for node in path] for path in self._paths]\n children_str = [node.name for node in self._children]\n parents_str = [node.name for node in self._parents]\n if self.is_rank_zero():\n self.logger.info(\"*** Problem Information ***\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n\n def set_module(self, module):\n \"\"\"\n Set new module for the current Problem class.\n \"\"\"\n self.module = module\n self.patch_module()\n\n def set_optimizer(self, optimizer):\n \"\"\"\n Set new optimizer for the current Problem class.\n \"\"\"\n self.optimizer = optimizer\n self.patch_optimizer()\n\n def set_scheduler(self, scheduler):\n \"\"\"\n Set new scheduler for the current Problem class.\n \"\"\"\n self.scheduler = scheduler\n self.patch_scheduler()\n\n def set_train_data_loader(self, loader, idx=0):\n \"\"\"\n Set new data loader for the current Problem class.\n \"\"\"\n self.train_data_loader[idx] = self.patch_data_loader(loader)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Users define how forward (or call) function is defined for the problem here.\n \"\"\"\n return self.module(*args, **kwargs)\n\n @abc.abstractmethod\n def training_step(self, batch):\n \"\"\"\n Users define the loss function of the problem here.\n \"\"\"\n raise NotImplementedError\n\n def training_step_exec(self, batch):\n if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n\n def one_step_descent(self, batch=None):\n # load data\n if batch is None:\n self.cur_batch = self.get_batch()\n batch = self.cur_batch\n\n # calculate loss\n loss, loss_dict = self.get_loss(batch)\n\n # calculate gradient (a.k.a backward)\n self.backward(\n loss=loss,\n params=self.trainable_parameters(),\n paths=self._paths,\n create_graph=not self._first_order,\n retain_graph=self._retain_graph,\n allow_unused=self._allow_unused,\n )\n if self.is_implemented(\"grad_callback\"):\n self.grad_callback()\n\n # calculate parameter update\n if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"\n self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n\n def get_batch(self):\n \"\"\"\n Load training batch from the user-provided data loader\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n\n def get_batch_single_loader(self, idx):\n \"\"\"\n Load training batch from one of the user-provided data loader(s)\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n data_iterator = self.train_data_iterator[idx]\n try:\n batch = next(data_iterator)\n except StopIteration:\n if idx == 0:\n self.epoch_callback_exec()\n self.epoch_counter[idx] += 1\n train_data_loader = self.train_data_loader[idx]\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n train_data_loader.set_epoch(self.epoch_counter[idx])\n self.train_data_iterator[idx] = iter(train_data_loader)\n batch = next(self.train_data_iterator[idx])\n if not isinstance(batch, dict):\n batch = tuple(\n convert_tensor(value, self.device, self._is_default_fp16())\n for value in batch\n )\n else:\n for key, value in batch.items():\n batch[key] = convert_tensor(value, self.device, self._is_default_fp16())\n\n return batch\n\n def get_loss(self, batch):\n \"\"\"\n Calculate loss and log metrics for the current batch based on the user-defined loss\n function.\n\n :return: loss and log metrics (e.g. classification accuracy)\n :rtype: dict\n \"\"\"\n maybe_loss_dict = self.training_step_exec(batch)\n is_dict = isinstance(maybe_loss_dict, dict)\n loss = maybe_loss_dict[\"loss\"] if is_dict else maybe_loss_dict\n loss_no_scale = loss.item()\n if self._is_default_fp16():\n loss = self.scaler.scale(loss)\n loss = loss / self.gas\n\n # construct loss dict\n loss_dict = {\"loss\": loss_no_scale}\n if is_dict:\n for key, value in maybe_loss_dict.items():\n if key != \"loss\":\n loss_dict[key] = value\n\n return loss, loss_dict\n\n def backward(\n self,\n loss,\n params,\n paths,\n create_graph=False,\n retain_graph=True,\n allow_unused=True,\n ):\n \"\"\"\n Calculate the gradient of ``loss`` with respect to ``params`` based on a user-defined\n ``config``.\n\n :param loss: Outputs of the differentiated function.\n :type loss: Tensor\n :param params: Inputs with respect to which the gradient will be returned.\n :type params: Sequence of Tensor\n :param paths: Paths on which the gradient will be calculated.\n :type paths: List of list of Problem\n :param create_graph:\n If ``True``, graph of the derivative will be constructed, allowing to compute higher order\n derivative products. Default: ``True``.\n :type create_graph: bool, optional\n :param retain_graph:\n If ``False``, the graph used to compute the grad will be freed. Note that in nearly all\n cases setting this option to ``True`` is not needed and often can be worked around in a much\n more efficient way. Defaults to the value of ``create_graph``.\n :type retain_graph: bool, optional\n :param allow_unused:\n If ``False``, specifying inputs that were not used when computing outputs (and therefore\n their grad is always zero) is an error. Defaults to ``False``.\n :type allow_unused: bool, optional\n \"\"\"\n # direct grad\n if len(paths) > 0 or not self.gradient_accumulation_boundary():\n grads = torch.autograd.grad(\n loss,\n params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n allow_unused=allow_unused,\n )\n self.set_grads(params, grads)\n else:\n torch.autograd.backward(\n loss,\n inputs=params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n )\n\n # indirect grad: best-response Jacobian\n if self._config.first_order:\n for idx, path in enumerate(paths):\n retain_graph_implicit = False if idx == len(paths) - 1 else True\n do_sync = bool(\n idx == len(paths) - 1 and self.gradient_accumulation_boundary()\n )\n grads = get_grads(loss, path, retain_graph_implicit, do_sync)\n if not do_sync:\n self.set_grads(params, grads)\n\n def set_grads(self, params, grads):\n \"\"\"\n Set gradients for trainable parameters. ``params.grad = grads``\n\n :param params: Trainable parameters\n :type params: Sequence of Tensor\n :param grads: Calculated gradient\n :type grads: Sequence of Tensor\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/8", "ground_truth": " for param, grad in zip(params, grads):\n if grad is not None:\n if hasattr(param, \"grad\") and param.grad is not None:\n param.grad = param.grad + grad\n else:\n param.grad = grad\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 169, "lineno": 579, "function_name": "set_grads"}, "groundtruth": " for param, grad in zip(params, grads):\n if grad is not None:\n if hasattr(param, \"grad\") and param.grad is not None:\n param.grad = param.grad + grad\n else:\n param.grad = grad\n"} +{"prompt": "\")\n self.logger.info(f\"Name: {self._name}\")\n self.logger.info(f\"Uppers: {parents_str}\")\n self.logger.info(f\"Lowers: {children_str}\")\n self.logger.info(f\"Paths: {path_str}\\n\")\n\n def patch_everything(self):\n \"\"\"\n We patch module, optimizer, data loader, and lr scheduler for device placement,\n distributed training, zero optimizer, fsdp, etc.\n \"\"\"\n self.patch_module()\n self.patch_optimizer()\n if self.scheduler is not None:\n self.patch_scheduler()\n if self.train_data_loader is not None:\n self.train_data_loader = [\n self.patch_data_loader(data_loader)\n for data_loader in self.train_data_loader\n ]\n\n def patch_module(self):\n \"\"\"\n Patch module given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.module.to(self.device)\n if self._strategy in [\"distributed\", \"zero\"]:\n self.synchronize_params(self.parameters())\n self.module = torch.nn.parallel.DistributedDataParallel(\n module=self.module,\n gradient_as_bucket_view=True,\n )\n elif self._strategy == \"fsdp\":\n if self.is_rank_zero():\n self.logger.warning(\"FSDP requires PyTorch version >= 1.12\")\n from torch.distributed.fsdp import FullyShardedDataParallel as FSDP\n\n self.synchronize_params(self.parameters())\n self.module = FSDP(self.module, device_id=self.device)\n elif self._strategy == \"accelerate\":\n self.module = self.accelerator.prepare(self.module)\n\n def patch_optimizer(self):\n \"\"\"\n Patch optimizer given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n params = self.trainable_parameters()\n if self.is_implemented(\"param_groups\") and self._strategy != \"fsdp\":\n params = self.param_groups()\n is_zero = True if self._strategy == \"zero\" else False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n\n def set_module(self, module):\n \"\"\"\n Set new module for the current Problem class.\n \"\"\"\n self.module = module\n self.patch_module()\n\n def set_optimizer(self, optimizer):\n \"\"\"\n Set new optimizer for the current Problem class.\n \"\"\"\n self.optimizer = optimizer\n self.patch_optimizer()\n\n def set_scheduler(self, scheduler):\n \"\"\"\n Set new scheduler for the current Problem class.\n \"\"\"\n self.scheduler = scheduler\n self.patch_scheduler()\n\n def set_train_data_loader(self, loader, idx=0):\n \"\"\"\n Set new data loader for the current Problem class.\n \"\"\"\n self.train_data_loader[idx] = self.patch_data_loader(loader)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Users define how forward (or call) function is defined for the problem here.\n \"\"\"\n return self.module(*args, **kwargs)\n\n @abc.abstractmethod\n def training_step(self, batch):\n \"\"\"\n Users define the loss function of the problem here.\n \"\"\"\n raise NotImplementedError\n\n def training_step_exec(self, batch):\n if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n\n def one_step_descent(self, batch=None):\n # load data\n if batch is None:\n self.cur_batch = self.get_batch()\n batch = self.cur_batch\n\n # calculate loss\n loss, loss_dict = self.get_loss(batch)\n\n # calculate gradient (a.k.a backward)\n self.backward(\n loss=loss,\n params=self.trainable_parameters(),\n paths=self._paths,\n create_graph=not self._first_order,\n retain_graph=self._retain_graph,\n allow_unused=self._allow_unused,\n )\n if self.is_implemented(\"grad_callback\"):\n self.grad_callback()\n\n # calculate parameter update\n if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"\n self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n\n def get_batch(self):\n \"\"\"\n Load training batch from the user-provided data loader\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n\n def get_batch_single_loader(self, idx):\n \"\"\"\n Load training batch from one of the user-provided data loader(s)\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n data_iterator = self.train_data_iterator[idx]\n try:\n batch = next(data_iterator)\n except StopIteration:\n if idx == 0:\n self.epoch_callback_exec()\n self.epoch_counter[idx] += 1\n train_data_loader = self.train_data_loader[idx]\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n train_data_loader.set_epoch(self.epoch_counter[idx])\n self.train_data_iterator[idx] = iter(train_data_loader)\n batch = next(self.train_data_iterator[idx])\n if not isinstance(batch, dict):\n batch = tuple(\n convert_tensor(value, self.device, self._is_default_fp16())\n for value in batch\n )\n else:\n for key, value in batch.items():\n batch[key] = convert_tensor(value, self.device, self._is_default_fp16())\n\n return batch\n\n def get_loss(self, batch):\n \"\"\"\n Calculate loss and log metrics for the current batch based on the user-defined loss\n function.\n\n :return: loss and log metrics (e.g. classification accuracy)\n :rtype: dict\n \"\"\"\n maybe_loss_dict = self.training_step_exec(batch)\n is_dict = isinstance(maybe_loss_dict, dict)\n loss = maybe_loss_dict[\"loss\"] if is_dict else maybe_loss_dict\n loss_no_scale = loss.item()\n if self._is_default_fp16():\n loss = self.scaler.scale(loss)\n loss = loss / self.gas\n\n # construct loss dict\n loss_dict = {\"loss\": loss_no_scale}\n if is_dict:\n for key, value in maybe_loss_dict.items():\n if key != \"loss\":\n loss_dict[key] = value\n\n return loss, loss_dict\n\n def backward(\n self,\n loss,\n params,\n paths,\n create_graph=False,\n retain_graph=True,\n allow_unused=True,\n ):\n \"\"\"\n Calculate the gradient of ``loss`` with respect to ``params`` based on a user-defined\n ``config``.\n\n :param loss: Outputs of the differentiated function.\n :type loss: Tensor\n :param params: Inputs with respect to which the gradient will be returned.\n :type params: Sequence of Tensor\n :param paths: Paths on which the gradient will be calculated.\n :type paths: List of list of Problem\n :param create_graph:\n If ``True``, graph of the derivative will be constructed, allowing to compute higher order\n derivative products. Default: ``True``.\n :type create_graph: bool, optional\n :param retain_graph:\n If ``False``, the graph used to compute the grad will be freed. Note that in nearly all\n cases setting this option to ``True`` is not needed and often can be worked around in a much\n more efficient way. Defaults to the value of ``create_graph``.\n :type retain_graph: bool, optional\n :param allow_unused:\n If ``False``, specifying inputs that were not used when computing outputs (and therefore\n their grad is always zero) is an error. Defaults to ``False``.\n :type allow_unused: bool, optional\n \"\"\"\n # direct grad\n if len(paths) > 0 or not self.gradient_accumulation_boundary():\n grads = torch.autograd.grad(\n loss,\n params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n allow_unused=allow_unused,\n )\n self.set_grads(params, grads)\n else:\n torch.autograd.backward(\n loss,\n inputs=params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n )\n\n # indirect grad: best-response Jacobian\n if self._config.first_order:\n for idx, path in enumerate(paths):\n retain_graph_implicit = False if idx == len(paths) - 1 else True\n do_sync = bool(\n idx == len(paths) - 1 and self.gradient_accumulation_boundary()\n )\n grads = get_grads(loss, path, retain_graph_implicit, do_sync)\n if not do_sync:\n self.set_grads(params, grads)\n\n def set_grads(self, params, grads):\n \"\"\"\n Set gradients for trainable parameters. ``params.grad = grads``\n\n :param params: Trainable parameters\n :type params: Sequence of Tensor\n :param grads: Calculated gradient\n :type grads: Sequence of Tensor\n \"\"\"\n for param, grad in zip(params, grads):\n if grad is not None:\n if hasattr(param, \"grad\") and param.grad is not None:\n param.grad = param.grad + grad\n else:\n param.grad = grad\n\n def synchronize_params(self, params):\n \"\"\"\n synchronize parameters across distributed data-parallel processes\n \"\"\"\n if self._world_size > 1 and self._strategy not in [\"fsdp\", \"accelerate\"]:\n for param in params:\n dist.broadcast(param.data, 0)\n\n @abc.abstractmethod\n def optimizer_step(self, *args, **kwargs):\n \"\"\"\n Update weights as in PyTorch's native ``optim.step()``\n \"\"\"\n raise NotImplementedError\n\n def zero_grad(self):\n \"\"\"\n Set gradients for trainable parameters for the current problem to 0.\n Similar with PyTorch's ``optim.zero_grad()`` or ``module.zero_grad()``.\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/9", "ground_truth": " for param in list(self.trainable_parameters()):\n if hasattr(param, \"grad\"):\n del param.grad\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 190, "lineno": 606, "function_name": "zero_grad"}, "groundtruth": " for param in list(self.trainable_parameters()):\n if hasattr(param, \"grad\"):\n del param.grad\n"} +{"prompt": " False\n if self._strategy == \"accelerate\":\n self.optimizer = self.accelerator.prepare(self.optimizer)\n else:\n self.optimizer = patch_optimizer(self.optimizer, params, is_zero)\n\n def patch_scheduler(self):\n \"\"\"\n Patch scheduler given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n self.scheduler = patch_scheduler(self.scheduler, self.optimizer)\n if self._strategy == \"accelerate\":\n self.scheduler = self.accelerator.prepare(self.scheduler)\n\n def patch_data_loader(self, loader):\n \"\"\"\n Patch data loader given the systems configuration (e.g., DDP, FSDP)\n \"\"\"\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n patched_loader = get_distributed_data_loader(\n loader, world_size=self._world_size, rank=self._rank\n )\n elif self._strategy == \"accelerate\":\n patched_loader = self.accelerator.prepare(loader)\n else:\n patched_loader = loader\n\n return patched_loader\n\n def set_module(self, module):\n \"\"\"\n Set new module for the current Problem class.\n \"\"\"\n self.module = module\n self.patch_module()\n\n def set_optimizer(self, optimizer):\n \"\"\"\n Set new optimizer for the current Problem class.\n \"\"\"\n self.optimizer = optimizer\n self.patch_optimizer()\n\n def set_scheduler(self, scheduler):\n \"\"\"\n Set new scheduler for the current Problem class.\n \"\"\"\n self.scheduler = scheduler\n self.patch_scheduler()\n\n def set_train_data_loader(self, loader, idx=0):\n \"\"\"\n Set new data loader for the current Problem class.\n \"\"\"\n self.train_data_loader[idx] = self.patch_data_loader(loader)\n\n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Users define how forward (or call) function is defined for the problem here.\n \"\"\"\n return self.module(*args, **kwargs)\n\n @abc.abstractmethod\n def training_step(self, batch):\n \"\"\"\n Users define the loss function of the problem here.\n \"\"\"\n raise NotImplementedError\n\n def training_step_exec(self, batch):\n if self._is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n\n def one_step_descent(self, batch=None):\n # load data\n if batch is None:\n self.cur_batch = self.get_batch()\n batch = self.cur_batch\n\n # calculate loss\n loss, loss_dict = self.get_loss(batch)\n\n # calculate gradient (a.k.a backward)\n self.backward(\n loss=loss,\n params=self.trainable_parameters(),\n paths=self._paths,\n create_graph=not self._first_order,\n retain_graph=self._retain_graph,\n allow_unused=self._allow_unused,\n )\n if self.is_implemented(\"grad_callback\"):\n self.grad_callback()\n\n # calculate parameter update\n if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"\n self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n\n def get_batch(self):\n \"\"\"\n Load training batch from the user-provided data loader\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n\n def get_batch_single_loader(self, idx):\n \"\"\"\n Load training batch from one of the user-provided data loader(s)\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n data_iterator = self.train_data_iterator[idx]\n try:\n batch = next(data_iterator)\n except StopIteration:\n if idx == 0:\n self.epoch_callback_exec()\n self.epoch_counter[idx] += 1\n train_data_loader = self.train_data_loader[idx]\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n train_data_loader.set_epoch(self.epoch_counter[idx])\n self.train_data_iterator[idx] = iter(train_data_loader)\n batch = next(self.train_data_iterator[idx])\n if not isinstance(batch, dict):\n batch = tuple(\n convert_tensor(value, self.device, self._is_default_fp16())\n for value in batch\n )\n else:\n for key, value in batch.items():\n batch[key] = convert_tensor(value, self.device, self._is_default_fp16())\n\n return batch\n\n def get_loss(self, batch):\n \"\"\"\n Calculate loss and log metrics for the current batch based on the user-defined loss\n function.\n\n :return: loss and log metrics (e.g. classification accuracy)\n :rtype: dict\n \"\"\"\n maybe_loss_dict = self.training_step_exec(batch)\n is_dict = isinstance(maybe_loss_dict, dict)\n loss = maybe_loss_dict[\"loss\"] if is_dict else maybe_loss_dict\n loss_no_scale = loss.item()\n if self._is_default_fp16():\n loss = self.scaler.scale(loss)\n loss = loss / self.gas\n\n # construct loss dict\n loss_dict = {\"loss\": loss_no_scale}\n if is_dict:\n for key, value in maybe_loss_dict.items():\n if key != \"loss\":\n loss_dict[key] = value\n\n return loss, loss_dict\n\n def backward(\n self,\n loss,\n params,\n paths,\n create_graph=False,\n retain_graph=True,\n allow_unused=True,\n ):\n \"\"\"\n Calculate the gradient of ``loss`` with respect to ``params`` based on a user-defined\n ``config``.\n\n :param loss: Outputs of the differentiated function.\n :type loss: Tensor\n :param params: Inputs with respect to which the gradient will be returned.\n :type params: Sequence of Tensor\n :param paths: Paths on which the gradient will be calculated.\n :type paths: List of list of Problem\n :param create_graph:\n If ``True``, graph of the derivative will be constructed, allowing to compute higher order\n derivative products. Default: ``True``.\n :type create_graph: bool, optional\n :param retain_graph:\n If ``False``, the graph used to compute the grad will be freed. Note that in nearly all\n cases setting this option to ``True`` is not needed and often can be worked around in a much\n more efficient way. Defaults to the value of ``create_graph``.\n :type retain_graph: bool, optional\n :param allow_unused:\n If ``False``, specifying inputs that were not used when computing outputs (and therefore\n their grad is always zero) is an error. Defaults to ``False``.\n :type allow_unused: bool, optional\n \"\"\"\n # direct grad\n if len(paths) > 0 or not self.gradient_accumulation_boundary():\n grads = torch.autograd.grad(\n loss,\n params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n allow_unused=allow_unused,\n )\n self.set_grads(params, grads)\n else:\n torch.autograd.backward(\n loss,\n inputs=params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n )\n\n # indirect grad: best-response Jacobian\n if self._config.first_order:\n for idx, path in enumerate(paths):\n retain_graph_implicit = False if idx == len(paths) - 1 else True\n do_sync = bool(\n idx == len(paths) - 1 and self.gradient_accumulation_boundary()\n )\n grads = get_grads(loss, path, retain_graph_implicit, do_sync)\n if not do_sync:\n self.set_grads(params, grads)\n\n def set_grads(self, params, grads):\n \"\"\"\n Set gradients for trainable parameters. ``params.grad = grads``\n\n :param params: Trainable parameters\n :type params: Sequence of Tensor\n :param grads: Calculated gradient\n :type grads: Sequence of Tensor\n \"\"\"\n for param, grad in zip(params, grads):\n if grad is not None:\n if hasattr(param, \"grad\") and param.grad is not None:\n param.grad = param.grad + grad\n else:\n param.grad = grad\n\n def synchronize_params(self, params):\n \"\"\"\n synchronize parameters across distributed data-parallel processes\n \"\"\"\n if self._world_size > 1 and self._strategy not in [\"fsdp\", \"accelerate\"]:\n for param in params:\n dist.broadcast(param.data, 0)\n\n @abc.abstractmethod\n def optimizer_step(self, *args, **kwargs):\n \"\"\"\n Update weights as in PyTorch's native ``optim.step()``\n \"\"\"\n raise NotImplementedError\n\n def zero_grad(self):\n \"\"\"\n Set gradients for trainable parameters for the current problem to 0.\n Similar with PyTorch's ``optim.zero_grad()`` or ``module.zero_grad()``.\n \"\"\"\n for param in list(self.trainable_parameters()):\n if hasattr(param, \"grad\"):\n del param.grad\n\n def clip_grad(self):\n \"\"\"\n Perform gradient clipping based on the norm provided by Config\n \"\"\"\n if self._strategy != \"fsdp\":\n torch.nn.utils.clip_grad_norm_(\n parameters=self.trainable_parameters(), max_norm=self.gradient_clipping\n )\n else:\n self.module.clip_grad_norm_(max_norm=self.gradient_clipping)\n\n def state_dict(self):\n \"\"\"\n Return all states involved in ``Problem`` with a Python dictionary. By default, it\n includes ``self.module.state_dict`` and ``self.optimizer.state_dict``. Depending on users'\n configurations, it may include ``self.scheuler.state_dict`` (lr scheduler) and\n ``self.scaler.state_dict`` (fp16 training)\n \"\"\"\n state_dict = {}\n state_dict[\"module\"] = self.module.state_dict()\n state_dict[\"optimizer\"] = self.optimizer.state_dict()\n if self.scheduler is not None:\n state_dict[\"scheduler\"] = self.scheduler.state_dict()\n if self._is_default_fp16():\n state_dict[\"scaler\"] = self.scaler.state_dict()\n\n return state_dict\n\n def load_state_dict(self, state_dict):\n \"\"\"Load the state for the ``Problem``\n\n Args:\n state_dict (dict): Python dictionary of Problem states.\n \"\"\"\n self.module.load_state_dict(state_dict[\"module\"])\n self.optimizer.load_state_dict(state_dict[\"optimizer\"])\n if self.scheduler is not None and \"scheduler\" in state_dict:\n self.scheduler.load_state_dict(state_dict[\"scheduler\"])\n if self._is_default_fp16() and \"scaler\" in state_dict:\n self.scaler.load_state_dict(state_dict[\"scaler\"])\n\n def configure_distributed_training(self, dictionary):\n \"\"\"\n Set the configuration for distributed training.\n\n :param dictionary: Python dictionary of distributed training provided by Engine.\n :type dictionary: dict\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/10", "ground_truth": " self._strategy = dictionary[\"strategy\"]\n self._backend = dictionary[\"backend\"]\n self._world_size = dictionary[\"world_size\"]\n self._rank = dictionary[\"rank\"]\n self._local_rank = dictionary[\"local_rank\"]\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 239, "lineno": 658, "function_name": "configure_distributed_training"}, "groundtruth": " self._strategy = dictionary[\"strategy\"]\n self._backend = dictionary[\"backend\"]\n self._world_size = dictionary[\"world_size\"]\n self._rank = dictionary[\"rank\"]\n self._local_rank = dictionary[\"local_rank\"]\n"} +{"prompt": "is_default_fp16():\n with torch.cuda.amp.autocast():\n return self.training_step(batch)\n else:\n return self.training_step(batch)\n\n def one_step_descent(self, batch=None):\n # load data\n if batch is None:\n self.cur_batch = self.get_batch()\n batch = self.cur_batch\n\n # calculate loss\n loss, loss_dict = self.get_loss(batch)\n\n # calculate gradient (a.k.a backward)\n self.backward(\n loss=loss,\n params=self.trainable_parameters(),\n paths=self._paths,\n create_graph=not self._first_order,\n retain_graph=self._retain_graph,\n allow_unused=self._allow_unused,\n )\n if self.is_implemented(\"grad_callback\"):\n self.grad_callback()\n\n # calculate parameter update\n if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"\n self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n\n def get_batch(self):\n \"\"\"\n Load training batch from the user-provided data loader\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n\n def get_batch_single_loader(self, idx):\n \"\"\"\n Load training batch from one of the user-provided data loader(s)\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n data_iterator = self.train_data_iterator[idx]\n try:\n batch = next(data_iterator)\n except StopIteration:\n if idx == 0:\n self.epoch_callback_exec()\n self.epoch_counter[idx] += 1\n train_data_loader = self.train_data_loader[idx]\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n train_data_loader.set_epoch(self.epoch_counter[idx])\n self.train_data_iterator[idx] = iter(train_data_loader)\n batch = next(self.train_data_iterator[idx])\n if not isinstance(batch, dict):\n batch = tuple(\n convert_tensor(value, self.device, self._is_default_fp16())\n for value in batch\n )\n else:\n for key, value in batch.items():\n batch[key] = convert_tensor(value, self.device, self._is_default_fp16())\n\n return batch\n\n def get_loss(self, batch):\n \"\"\"\n Calculate loss and log metrics for the current batch based on the user-defined loss\n function.\n\n :return: loss and log metrics (e.g. classification accuracy)\n :rtype: dict\n \"\"\"\n maybe_loss_dict = self.training_step_exec(batch)\n is_dict = isinstance(maybe_loss_dict, dict)\n loss = maybe_loss_dict[\"loss\"] if is_dict else maybe_loss_dict\n loss_no_scale = loss.item()\n if self._is_default_fp16():\n loss = self.scaler.scale(loss)\n loss = loss / self.gas\n\n # construct loss dict\n loss_dict = {\"loss\": loss_no_scale}\n if is_dict:\n for key, value in maybe_loss_dict.items():\n if key != \"loss\":\n loss_dict[key] = value\n\n return loss, loss_dict\n\n def backward(\n self,\n loss,\n params,\n paths,\n create_graph=False,\n retain_graph=True,\n allow_unused=True,\n ):\n \"\"\"\n Calculate the gradient of ``loss`` with respect to ``params`` based on a user-defined\n ``config``.\n\n :param loss: Outputs of the differentiated function.\n :type loss: Tensor\n :param params: Inputs with respect to which the gradient will be returned.\n :type params: Sequence of Tensor\n :param paths: Paths on which the gradient will be calculated.\n :type paths: List of list of Problem\n :param create_graph:\n If ``True``, graph of the derivative will be constructed, allowing to compute higher order\n derivative products. Default: ``True``.\n :type create_graph: bool, optional\n :param retain_graph:\n If ``False``, the graph used to compute the grad will be freed. Note that in nearly all\n cases setting this option to ``True`` is not needed and often can be worked around in a much\n more efficient way. Defaults to the value of ``create_graph``.\n :type retain_graph: bool, optional\n :param allow_unused:\n If ``False``, specifying inputs that were not used when computing outputs (and therefore\n their grad is always zero) is an error. Defaults to ``False``.\n :type allow_unused: bool, optional\n \"\"\"\n # direct grad\n if len(paths) > 0 or not self.gradient_accumulation_boundary():\n grads = torch.autograd.grad(\n loss,\n params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n allow_unused=allow_unused,\n )\n self.set_grads(params, grads)\n else:\n torch.autograd.backward(\n loss,\n inputs=params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n )\n\n # indirect grad: best-response Jacobian\n if self._config.first_order:\n for idx, path in enumerate(paths):\n retain_graph_implicit = False if idx == len(paths) - 1 else True\n do_sync = bool(\n idx == len(paths) - 1 and self.gradient_accumulation_boundary()\n )\n grads = get_grads(loss, path, retain_graph_implicit, do_sync)\n if not do_sync:\n self.set_grads(params, grads)\n\n def set_grads(self, params, grads):\n \"\"\"\n Set gradients for trainable parameters. ``params.grad = grads``\n\n :param params: Trainable parameters\n :type params: Sequence of Tensor\n :param grads: Calculated gradient\n :type grads: Sequence of Tensor\n \"\"\"\n for param, grad in zip(params, grads):\n if grad is not None:\n if hasattr(param, \"grad\") and param.grad is not None:\n param.grad = param.grad + grad\n else:\n param.grad = grad\n\n def synchronize_params(self, params):\n \"\"\"\n synchronize parameters across distributed data-parallel processes\n \"\"\"\n if self._world_size > 1 and self._strategy not in [\"fsdp\", \"accelerate\"]:\n for param in params:\n dist.broadcast(param.data, 0)\n\n @abc.abstractmethod\n def optimizer_step(self, *args, **kwargs):\n \"\"\"\n Update weights as in PyTorch's native ``optim.step()``\n \"\"\"\n raise NotImplementedError\n\n def zero_grad(self):\n \"\"\"\n Set gradients for trainable parameters for the current problem to 0.\n Similar with PyTorch's ``optim.zero_grad()`` or ``module.zero_grad()``.\n \"\"\"\n for param in list(self.trainable_parameters()):\n if hasattr(param, \"grad\"):\n del param.grad\n\n def clip_grad(self):\n \"\"\"\n Perform gradient clipping based on the norm provided by Config\n \"\"\"\n if self._strategy != \"fsdp\":\n torch.nn.utils.clip_grad_norm_(\n parameters=self.trainable_parameters(), max_norm=self.gradient_clipping\n )\n else:\n self.module.clip_grad_norm_(max_norm=self.gradient_clipping)\n\n def state_dict(self):\n \"\"\"\n Return all states involved in ``Problem`` with a Python dictionary. By default, it\n includes ``self.module.state_dict`` and ``self.optimizer.state_dict``. Depending on users'\n configurations, it may include ``self.scheuler.state_dict`` (lr scheduler) and\n ``self.scaler.state_dict`` (fp16 training)\n \"\"\"\n state_dict = {}\n state_dict[\"module\"] = self.module.state_dict()\n state_dict[\"optimizer\"] = self.optimizer.state_dict()\n if self.scheduler is not None:\n state_dict[\"scheduler\"] = self.scheduler.state_dict()\n if self._is_default_fp16():\n state_dict[\"scaler\"] = self.scaler.state_dict()\n\n return state_dict\n\n def load_state_dict(self, state_dict):\n \"\"\"Load the state for the ``Problem``\n\n Args:\n state_dict (dict): Python dictionary of Problem states.\n \"\"\"\n self.module.load_state_dict(state_dict[\"module\"])\n self.optimizer.load_state_dict(state_dict[\"optimizer\"])\n if self.scheduler is not None and \"scheduler\" in state_dict:\n self.scheduler.load_state_dict(state_dict[\"scheduler\"])\n if self._is_default_fp16() and \"scaler\" in state_dict:\n self.scaler.load_state_dict(state_dict[\"scaler\"])\n\n def configure_distributed_training(self, dictionary):\n \"\"\"\n Set the configuration for distributed training.\n\n :param dictionary: Python dictionary of distributed training provided by Engine.\n :type dictionary: dict\n \"\"\"\n self._strategy = dictionary[\"strategy\"]\n self._backend = dictionary[\"backend\"]\n self._world_size = dictionary[\"world_size\"]\n self._rank = dictionary[\"rank\"]\n self._local_rank = dictionary[\"local_rank\"]\n\n def configure_roll_back(self, roll_back):\n \"\"\"\n Set the roll-back (warm- start) option from Engine\n\n :param roll_back: roll-back (warm-start) on/off\n :type roll_back: bool\n \"\"\"\n if len(self._parents) > 0:\n self._roll_back = roll_back\n\n def configure_device(self, device):\n \"\"\"\n Set the device for the current problem.\n \"\"\"\n self.device = device\n\n def get_opt_param_group_for_param(self, param):\n \"\"\"\n Get optimizer param_group for specific parameter\n\n :param param: Parameter for which optimizer param_group is inquired\n :type param: torch.nn.Parameter\n :return: param_group for the given parameter\n :rtype: dict\n \"\"\"\n param_groups = self.optimizer.param_groups\n for group in param_groups:\n for p in group[\"params\"]:\n if param is p:\n return group\n\n def get_opt_state_for_param(self, param):\n \"\"\"\n Get optimizer state for specific parameter\n\n :param param: Parameter for which optimizer state is inquired\n :type param: torch.nn.Parameter\n :return: optimizer state for the given parameter\n :rtype: dict\n \"\"\"\n state = self.optimizer.state\n return state[param]\n\n @abc.abstractmethod\n def cache_states(self):\n \"\"\"\n Cache params, buffers, optimizer states when ``config.roll_back`` is set to ``True`` in\n ``step``.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def recover_states(self):\n \"\"\"\n Recover params, buffers, optimizer states when ``config.roll_back`` is set to ``True`` in\n ``step``.\n \"\"\"\n raise NotImplementedError\n\n def epoch_callback_exec(self):\n if self.is_implemented(\"epoch_callback\"):\n self.epoch_callback()\n\n def gradient_accumulation_boundary(self):\n \"\"\"\n Check whether the current step is on the gradient accumulation boundary\n \"\"\"\n return bool(self._count % self.gas == 0)\n\n def _is_default_fp16(self):\n \"\"\"\n Check whether to use PyTorch native fp16 (mixed-precision) feature\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/11", "ground_truth": " if not self._fp16 or self._strategy in [\"accelerate\"]:\n return False\n return True\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 312, "lineno": 737, "function_name": "_is_default_fp16"}, "groundtruth": " if not self._fp16 or self._strategy in [\"accelerate\"]:\n return False\n return True\n"} +{"prompt": " if self._count % self.gas == 0:\n self.optimizer_step()\n\n # param callback (e.g., parameter clipping)\n if self.is_implemented(\"param_callback\"):\n self.param_callback()\n\n if self._strategy != \"default\" and self._count % (self.gas * 20) == 0:\n self.synchronize_params(self.trainable_parameters())\n\n # zero-out grad\n self.zero_grad()\n\n return loss_dict\n\n def step_normal(self, global_step=None):\n if self.check_ready():\n # loop start\n if self._inner_loop_start:\n if self.is_implemented(\"on_inner_loop_start\"):\n self.on_inner_loop_start()\n self._inner_loop_start = False\n\n # copy current parameters, buffers, optimizer states\n if self._roll_back:\n self.cache_states()\n\n # increase count (local step)\n if self._training:\n self._count += 1\n\n # one step grdient descent\n loss_dict = self.one_step_descent()\n\n # lr scheduler step\n if self.scheduler is not None and not self._roll_back:\n self.scheduler.step()\n\n # logging\n if (\n self.log_step > 0\n and self._count % self.log_step == 0\n and self.is_rank_zero()\n ):\n self.log(loss_dict, global_step)\n\n # call parent step_normal after unrolling\n if (\n self._training\n and self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_normal(global_step=global_step)\n\n self._inner_loop_start = True\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step_after_roll_back(self):\n if self.check_ready() and self._training:\n if self._roll_back:\n # recover from cached states\n self.recover_states()\n\n # one step gradient step\n _ = self.one_step_descent(batch=self.cur_batch)\n\n # lr scheduler\n if self.scheduler is not None:\n self.scheduler.step()\n\n # call parent step_after_roll_back\n for problem in self._parents:\n idx = problem.children.index(self)\n problem.ready[idx] = True\n problem.step_after_roll_back()\n\n self.ready = [False for _ in range(len(self._children))]\n\n def step(self, global_step=None):\n \"\"\"\n ``step`` method abstracts a one-step gradient descent update with four sub-steps:\n 1) data loading, 2) cost calculation, 3) gradient calculation, and 4) parameter update.\n It also calls upper-level problems' step methods after unrolling gradient steps based on\n the hierarchical dependency graph.\n\n :param global_step: global step of the whole multilevel optimization. Defaults to None.\n :type global_step: int, optional\n \"\"\"\n self._global_step = global_step\n self.step_normal(global_step=global_step)\n if (\n self._count % (self._unroll_steps * self.gas) == 0\n and self._count > self.warmup_steps\n ):\n self.step_after_roll_back()\n\n def get_batch(self):\n \"\"\"\n Load training batch from the user-provided data loader\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n batch = tuple(\n self.get_batch_single_loader(i) for i in range(len(self.train_data_loader))\n )\n\n return batch[0] if len(batch) == 1 else batch\n\n def get_batch_single_loader(self, idx):\n \"\"\"\n Load training batch from one of the user-provided data loader(s)\n\n :return: New training batch\n :rtype: Any\n \"\"\"\n data_iterator = self.train_data_iterator[idx]\n try:\n batch = next(data_iterator)\n except StopIteration:\n if idx == 0:\n self.epoch_callback_exec()\n self.epoch_counter[idx] += 1\n train_data_loader = self.train_data_loader[idx]\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n train_data_loader.set_epoch(self.epoch_counter[idx])\n self.train_data_iterator[idx] = iter(train_data_loader)\n batch = next(self.train_data_iterator[idx])\n if not isinstance(batch, dict):\n batch = tuple(\n convert_tensor(value, self.device, self._is_default_fp16())\n for value in batch\n )\n else:\n for key, value in batch.items():\n batch[key] = convert_tensor(value, self.device, self._is_default_fp16())\n\n return batch\n\n def get_loss(self, batch):\n \"\"\"\n Calculate loss and log metrics for the current batch based on the user-defined loss\n function.\n\n :return: loss and log metrics (e.g. classification accuracy)\n :rtype: dict\n \"\"\"\n maybe_loss_dict = self.training_step_exec(batch)\n is_dict = isinstance(maybe_loss_dict, dict)\n loss = maybe_loss_dict[\"loss\"] if is_dict else maybe_loss_dict\n loss_no_scale = loss.item()\n if self._is_default_fp16():\n loss = self.scaler.scale(loss)\n loss = loss / self.gas\n\n # construct loss dict\n loss_dict = {\"loss\": loss_no_scale}\n if is_dict:\n for key, value in maybe_loss_dict.items():\n if key != \"loss\":\n loss_dict[key] = value\n\n return loss, loss_dict\n\n def backward(\n self,\n loss,\n params,\n paths,\n create_graph=False,\n retain_graph=True,\n allow_unused=True,\n ):\n \"\"\"\n Calculate the gradient of ``loss`` with respect to ``params`` based on a user-defined\n ``config``.\n\n :param loss: Outputs of the differentiated function.\n :type loss: Tensor\n :param params: Inputs with respect to which the gradient will be returned.\n :type params: Sequence of Tensor\n :param paths: Paths on which the gradient will be calculated.\n :type paths: List of list of Problem\n :param create_graph:\n If ``True``, graph of the derivative will be constructed, allowing to compute higher order\n derivative products. Default: ``True``.\n :type create_graph: bool, optional\n :param retain_graph:\n If ``False``, the graph used to compute the grad will be freed. Note that in nearly all\n cases setting this option to ``True`` is not needed and often can be worked around in a much\n more efficient way. Defaults to the value of ``create_graph``.\n :type retain_graph: bool, optional\n :param allow_unused:\n If ``False``, specifying inputs that were not used when computing outputs (and therefore\n their grad is always zero) is an error. Defaults to ``False``.\n :type allow_unused: bool, optional\n \"\"\"\n # direct grad\n if len(paths) > 0 or not self.gradient_accumulation_boundary():\n grads = torch.autograd.grad(\n loss,\n params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n allow_unused=allow_unused,\n )\n self.set_grads(params, grads)\n else:\n torch.autograd.backward(\n loss,\n inputs=params,\n create_graph=create_graph,\n retain_graph=retain_graph,\n )\n\n # indirect grad: best-response Jacobian\n if self._config.first_order:\n for idx, path in enumerate(paths):\n retain_graph_implicit = False if idx == len(paths) - 1 else True\n do_sync = bool(\n idx == len(paths) - 1 and self.gradient_accumulation_boundary()\n )\n grads = get_grads(loss, path, retain_graph_implicit, do_sync)\n if not do_sync:\n self.set_grads(params, grads)\n\n def set_grads(self, params, grads):\n \"\"\"\n Set gradients for trainable parameters. ``params.grad = grads``\n\n :param params: Trainable parameters\n :type params: Sequence of Tensor\n :param grads: Calculated gradient\n :type grads: Sequence of Tensor\n \"\"\"\n for param, grad in zip(params, grads):\n if grad is not None:\n if hasattr(param, \"grad\") and param.grad is not None:\n param.grad = param.grad + grad\n else:\n param.grad = grad\n\n def synchronize_params(self, params):\n \"\"\"\n synchronize parameters across distributed data-parallel processes\n \"\"\"\n if self._world_size > 1 and self._strategy not in [\"fsdp\", \"accelerate\"]:\n for param in params:\n dist.broadcast(param.data, 0)\n\n @abc.abstractmethod\n def optimizer_step(self, *args, **kwargs):\n \"\"\"\n Update weights as in PyTorch's native ``optim.step()``\n \"\"\"\n raise NotImplementedError\n\n def zero_grad(self):\n \"\"\"\n Set gradients for trainable parameters for the current problem to 0.\n Similar with PyTorch's ``optim.zero_grad()`` or ``module.zero_grad()``.\n \"\"\"\n for param in list(self.trainable_parameters()):\n if hasattr(param, \"grad\"):\n del param.grad\n\n def clip_grad(self):\n \"\"\"\n Perform gradient clipping based on the norm provided by Config\n \"\"\"\n if self._strategy != \"fsdp\":\n torch.nn.utils.clip_grad_norm_(\n parameters=self.trainable_parameters(), max_norm=self.gradient_clipping\n )\n else:\n self.module.clip_grad_norm_(max_norm=self.gradient_clipping)\n\n def state_dict(self):\n \"\"\"\n Return all states involved in ``Problem`` with a Python dictionary. By default, it\n includes ``self.module.state_dict`` and ``self.optimizer.state_dict``. Depending on users'\n configurations, it may include ``self.scheuler.state_dict`` (lr scheduler) and\n ``self.scaler.state_dict`` (fp16 training)\n \"\"\"\n state_dict = {}\n state_dict[\"module\"] = self.module.state_dict()\n state_dict[\"optimizer\"] = self.optimizer.state_dict()\n if self.scheduler is not None:\n state_dict[\"scheduler\"] = self.scheduler.state_dict()\n if self._is_default_fp16():\n state_dict[\"scaler\"] = self.scaler.state_dict()\n\n return state_dict\n\n def load_state_dict(self, state_dict):\n \"\"\"Load the state for the ``Problem``\n\n Args:\n state_dict (dict): Python dictionary of Problem states.\n \"\"\"\n self.module.load_state_dict(state_dict[\"module\"])\n self.optimizer.load_state_dict(state_dict[\"optimizer\"])\n if self.scheduler is not None and \"scheduler\" in state_dict:\n self.scheduler.load_state_dict(state_dict[\"scheduler\"])\n if self._is_default_fp16() and \"scaler\" in state_dict:\n self.scaler.load_state_dict(state_dict[\"scaler\"])\n\n def configure_distributed_training(self, dictionary):\n \"\"\"\n Set the configuration for distributed training.\n\n :param dictionary: Python dictionary of distributed training provided by Engine.\n :type dictionary: dict\n \"\"\"\n self._strategy = dictionary[\"strategy\"]\n self._backend = dictionary[\"backend\"]\n self._world_size = dictionary[\"world_size\"]\n self._rank = dictionary[\"rank\"]\n self._local_rank = dictionary[\"local_rank\"]\n\n def configure_roll_back(self, roll_back):\n \"\"\"\n Set the roll-back (warm- start) option from Engine\n\n :param roll_back: roll-back (warm-start) on/off\n :type roll_back: bool\n \"\"\"\n if len(self._parents) > 0:\n self._roll_back = roll_back\n\n def configure_device(self, device):\n \"\"\"\n Set the device for the current problem.\n \"\"\"\n self.device = device\n\n def get_opt_param_group_for_param(self, param):\n \"\"\"\n Get optimizer param_group for specific parameter\n\n :param param: Parameter for which optimizer param_group is inquired\n :type param: torch.nn.Parameter\n :return: param_group for the given parameter\n :rtype: dict\n \"\"\"\n param_groups = self.optimizer.param_groups\n for group in param_groups:\n for p in group[\"params\"]:\n if param is p:\n return group\n\n def get_opt_state_for_param(self, param):\n \"\"\"\n Get optimizer state for specific parameter\n\n :param param: Parameter for which optimizer state is inquired\n :type param: torch.nn.Parameter\n :return: optimizer state for the given parameter\n :rtype: dict\n \"\"\"\n state = self.optimizer.state\n return state[param]\n\n @abc.abstractmethod\n def cache_states(self):\n \"\"\"\n Cache params, buffers, optimizer states when ``config.roll_back`` is set to ``True`` in\n ``step``.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def recover_states(self):\n \"\"\"\n Recover params, buffers, optimizer states when ``config.roll_back`` is set to ``True`` in\n ``step``.\n \"\"\"\n raise NotImplementedError\n\n def epoch_callback_exec(self):\n if self.is_implemented(\"epoch_callback\"):\n self.epoch_callback()\n\n def gradient_accumulation_boundary(self):\n \"\"\"\n Check whether the current step is on the gradient accumulation boundary\n \"\"\"\n return bool(self._count % self.gas == 0)\n\n def _is_default_fp16(self):\n \"\"\"\n Check whether to use PyTorch native fp16 (mixed-precision) feature\n \"\"\"\n if not self._fp16 or self._strategy in [\"accelerate\"]:\n return False\n return True\n\n def is_implemented(self, fn_name):\n \"\"\"\n Check if ``fn_name`` method is implemented in the class\n\n :rtype: bool\n \"\"\"\n return callable(getattr(self, fn_name, None))\n\n def check_ready(self):\n \"\"\"\n Check if unrolling processes of lower level problems in the hierarchical dependency\n graph are all ready/done. ``step`` function is only excuted when this method returns\n ``True``.\n\n :rtype: bool\n \"\"\"\n return all(self.ready)\n\n def log(self, stats, global_step):\n \"\"\"\n Log (training) stats to the ``self.logger``\n\n :param stats: log metrics such as loss and classification accuracy.\n :type stats: Any\n :param step: global/local step associated with the ``stats``.\n :type step: int\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/12", "ground_truth": " loss_log = log_from_loss_dict(stats)\n if global_step is None:\n self.logger.info(\n f'[Problem \"{self._name}\"] [Local Step {self._count}] {loss_log}'\n )\n else:\n self.logger.info(\n f'[Problem \"{self._name}\"] [Global Step {global_step}] [Local Step {self._count}] '\n f\"{loss_log}\"\n )\n cur_step = global_step\n if global_step is None or self.log_local_step:\n cur_step = self._count\n self.logger.log(stats, tag=self._name, step=cur_step)\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "problem.py"], "context_start_lineno": 340, "lineno": 768, "function_name": "log"}, "groundtruth": " loss_log = log_from_loss_dict(stats)\n if global_step is None:\n self.logger.info(\n f'[Problem \"{self._name}\"] [Local Step {self._count}] {loss_log}'\n )\n else:\n self.logger.info(\n f'[Problem \"{self._name}\"] [Global Step {global_step}] [Local Step {self._count}] '\n f\"{loss_log}\"\n )\n cur_step = global_step\n if global_step is None or self.log_local_step:\n cur_step = self._count\n self.logger.log(stats, tag=self._name, step=cur_step)\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom betty.problems import Problem\n\n\n# pylint: disable=W0223\nclass ImplicitProblem(Problem):\n \"\"\"\n ``ImplicitProblem`` is sublassed from ``Problem``.\n \"\"\"\n\n def __init__(\n self,\n name,\n config,\n module=None,\n optimizer=None,\n scheduler=None,\n train_data_loader=None,\n extra_config=None,\n ):", "metadata": {"task_id": "leopard-ai--betty/13", "ground_truth": " super().__init__(\n name,\n config,\n module,\n optimizer,\n scheduler,\n train_data_loader,\n extra_config,\n )\n self.module_state_dict_cache = None\n self.opitmizer_state_dict_cache = None\n", "fpath_tuple": ["leopard-ai_betty", "betty", "problems", "implicit_problem.py"], "context_start_lineno": 0, "lineno": 24, "function_name": "__init__"}, "groundtruth": " super().__init__(\n name,\n config,\n module,\n optimizer,\n scheduler,\n train_data_loader,\n extra_config,\n )\n self.module_state_dict_cache = None\n self.opitmizer_state_dict_cache = None\n"} +{"prompt": "import torch\n\n\ndef convert_tensor(item, device=None, fp16=False):", "metadata": {"task_id": "leopard-ai--betty/14", "ground_truth": " if not isinstance(item, torch.Tensor):\n return item\n return item.to(device)\n", "fpath_tuple": ["leopard-ai_betty", "betty", "utils.py"], "context_start_lineno": 0, "lineno": 4, "function_name": "convert_tensor"}, "groundtruth": " if not isinstance(item, torch.Tensor):\n return item\n return item.to(device)\n"} +{"prompt": "import torch\n\n\ndef convert_tensor(item, device=None, fp16=False):\n if not isinstance(item, torch.Tensor):\n return item\n return item.to(device)\n\n\ndef get_grad_norm(parameters):\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n\n total_norm = 0.0\n for p in parameters:\n param_norm = p.grad.data.float().norm()\n total_norm += param_norm.item() ** 2\n\n if (\n total_norm == float(\"inf\")\n or total_norm == -float(\"inf\")\n or total_norm != total_norm\n ):\n total_norm = -1\n\n return total_norm\n\n\ndef get_weight_norm(parameters):\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n\n total_norm = 0.0\n for p in parameters:\n param_norm = torch.norm(p, dtype=torch.float32)\n total_norm += param_norm.item() ** 2\n\n if (\n total_norm == float(\"inf\")\n or total_norm == -float(\"inf\")\n or total_norm != total_norm\n ):\n total_norm = -1\n\n return total_norm\n\n\ndef flatten_list(regular_list):\n \"\"\"[summary]\n Flatten list of lists\n \"\"\"\n if type(regular_list[0] == list):\n return [item for sublist in regular_list for item in sublist]\n return regular_list\n\n\ndef get_param_index(param, param_list):\n param_list = list(param_list)\n for idx, p in enumerate(param_list):\n if p is param:\n return idx\n print(\"no corresponding parameter found!\")\n\n\ndef get_multiplier(problem):\n if problem.leaf:\n return 1\n\n assert len(problem.children) > 0\n # stack to store all the nodes of tree\n s1 = []\n # stack to store all the leaf nodes\n s2 = []\n\n s1.append((problem, 1))\n while len(s1) != 0:\n curr, multiplier = s1.pop(0)\n\n if len(curr.children) != 0:\n for child in curr.children:\n s1.append((child, multiplier * curr.config.step))\n else:\n s2.append(multiplier)\n\n assert all(x == s2[0] for x in s2)\n return s2[0]\n\n\ndef log_from_loss_dict(loss_dict):", "metadata": {"task_id": "leopard-ai--betty/15", "ground_truth": " outputs = []\n for key, values in loss_dict.items():\n if isinstance(values, dict) or isinstance(values, list):\n for value_idx, value in enumerate(values):\n full_key = key + \"_\" + str(value_idx)\n if torch.is_tensor(value):\n value = value.item()\n output = f\"{full_key}: {value}\"\n outputs.append(output)\n else:\n if torch.is_tensor(values):\n values = values.item()\n output = f\"{key}: {values}\"\n outputs.append(output)\n return \" || \".join(outputs)\n", "fpath_tuple": ["leopard-ai_betty", "betty", "utils.py"], "context_start_lineno": 0, "lineno": 90, "function_name": "log_from_loss_dict"}, "groundtruth": " outputs = []\n for key, values in loss_dict.items():\n if isinstance(values, dict) or isinstance(values, list):\n for value_idx, value in enumerate(values):\n full_key = key + \"_\" + str(value_idx)\n if torch.is_tensor(value):\n value = value.item()\n output = f\"{full_key}: {value}\"\n outputs.append(output)\n else:\n if torch.is_tensor(values):\n values = values.item()\n output = f\"{key}: {values}\"\n outputs.append(output)\n return \" || \".join(outputs)\n"} +{"prompt": "import torch\n\n\ndef convert_tensor(item, device=None, fp16=False):\n if not isinstance(item, torch.Tensor):\n return item\n return item.to(device)\n\n\ndef get_grad_norm(parameters):\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n\n total_norm = 0.0\n for p in parameters:\n param_norm = p.grad.data.float().norm()\n total_norm += param_norm.item() ** 2\n\n if (\n total_norm == float(\"inf\")\n or total_norm == -float(\"inf\")\n or total_norm != total_norm\n ):\n total_norm = -1\n\n return total_norm\n\n\ndef get_weight_norm(parameters):\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n\n total_norm = 0.0\n for p in parameters:\n param_norm = torch.norm(p, dtype=torch.float32)\n total_norm += param_norm.item() ** 2\n\n if (\n total_norm == float(\"inf\")\n or total_norm == -float(\"inf\")\n or total_norm != total_norm\n ):\n total_norm = -1\n\n return total_norm\n\n\ndef flatten_list(regular_list):\n \"\"\"[summary]\n Flatten list of lists\n \"\"\"\n if type(regular_list[0] == list):\n return [item for sublist in regular_list for item in sublist]\n return regular_list\n\n\ndef get_param_index(param, param_list):\n param_list = list(param_list)\n for idx, p in enumerate(param_list):\n if p is param:\n return idx\n print(\"no corresponding parameter found!\")\n\n\ndef get_multiplier(problem):\n if problem.leaf:\n return 1\n\n assert len(problem.children) > 0\n # stack to store all the nodes of tree\n s1 = []\n # stack to store all the leaf nodes\n s2 = []\n\n s1.append((problem, 1))\n while len(s1) != 0:\n curr, multiplier = s1.pop(0)\n\n if len(curr.children) != 0:\n for child in curr.children:\n s1.append((child, multiplier * curr.config.step))\n else:\n s2.append(multiplier)\n\n assert all(x == s2[0] for x in s2)\n return s2[0]\n\n\ndef log_from_loss_dict(loss_dict):\n outputs = []\n for key, values in loss_dict.items():\n if isinstance(values, dict) or isinstance(values, list):\n for value_idx, value in enumerate(values):\n full_key = key + \"_\" + str(value_idx)\n if torch.is_tensor(value):\n value = value.item()\n output = f\"{full_key}: {value}\"\n outputs.append(output)\n else:\n if torch.is_tensor(values):\n values = values.item()\n output = f\"{key}: {values}\"\n outputs.append(output)\n return \" || \".join(outputs)\n\n\ndef to_vec(tensor_list, alpha=1.0):\n return torch.cat([alpha * t.reshape(-1) for t in tensor_list])\n\n\ndef count_parameters(tensor_list):\n return sum([tensor.numel() for tensor in tensor_list])\n\n\ndef neg_with_none(a):", "metadata": {"task_id": "leopard-ai--betty/16", "ground_truth": " if a is None:\n return None\n else:\n return -a\n", "fpath_tuple": ["leopard-ai_betty", "betty", "utils.py"], "context_start_lineno": 0, "lineno": 116, "function_name": "neg_with_none"}, "groundtruth": " if a is None:\n return None\n else:\n return -a\n"} +{"prompt": "import torch\n\n\ndef convert_tensor(item, device=None, fp16=False):\n if not isinstance(item, torch.Tensor):\n return item\n return item.to(device)\n\n\ndef get_grad_norm(parameters):\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n parameters = list(filter(lambda p: p.grad is not None, parameters))\n\n total_norm = 0.0\n for p in parameters:\n param_norm = p.grad.data.float().norm()\n total_norm += param_norm.item() ** 2\n\n if (\n total_norm == float(\"inf\")\n or total_norm == -float(\"inf\")\n or total_norm != total_norm\n ):\n total_norm = -1\n\n return total_norm\n\n\ndef get_weight_norm(parameters):\n if isinstance(parameters, torch.Tensor):\n parameters = [parameters]\n\n total_norm = 0.0\n for p in parameters:\n param_norm = torch.norm(p, dtype=torch.float32)\n total_norm += param_norm.item() ** 2\n\n if (\n total_norm == float(\"inf\")\n or total_norm == -float(\"inf\")\n or total_norm != total_norm\n ):\n total_norm = -1\n\n return total_norm\n\n\ndef flatten_list(regular_list):\n \"\"\"[summary]\n Flatten list of lists\n \"\"\"\n if type(regular_list[0] == list):\n return [item for sublist in regular_list for item in sublist]\n return regular_list\n\n\ndef get_param_index(param, param_list):\n param_list = list(param_list)\n for idx, p in enumerate(param_list):\n if p is param:\n return idx\n print(\"no corresponding parameter found!\")\n\n\ndef get_multiplier(problem):\n if problem.leaf:\n return 1\n\n assert len(problem.children) > 0\n # stack to store all the nodes of tree\n s1 = []\n # stack to store all the leaf nodes\n s2 = []\n\n s1.append((problem, 1))\n while len(s1) != 0:\n curr, multiplier = s1.pop(0)\n\n if len(curr.children) != 0:\n for child in curr.children:\n s1.append((child, multiplier * curr.config.step))\n else:\n s2.append(multiplier)\n\n assert all(x == s2[0] for x in s2)\n return s2[0]\n\n\ndef log_from_loss_dict(loss_dict):\n outputs = []\n for key, values in loss_dict.items():\n if isinstance(values, dict) or isinstance(values, list):\n for value_idx, value in enumerate(values):\n full_key = key + \"_\" + str(value_idx)\n if torch.is_tensor(value):\n value = value.item()\n output = f\"{full_key}: {value}\"\n outputs.append(output)\n else:\n if torch.is_tensor(values):\n values = values.item()\n output = f\"{key}: {values}\"\n outputs.append(output)\n return \" || \".join(outputs)\n\n\ndef to_vec(tensor_list, alpha=1.0):\n return torch.cat([alpha * t.reshape(-1) for t in tensor_list])\n\n\ndef count_parameters(tensor_list):\n return sum([tensor.numel() for tensor in tensor_list])\n\n\ndef neg_with_none(a):\n if a is None:\n return None\n else:\n return -a\n\n\ndef replace_none_with_zero(tensor_list, reference):", "metadata": {"task_id": "leopard-ai--betty/17", "ground_truth": " out = []\n for t, r in zip(tensor_list, reference):\n fixed = t if t is not None else torch.zeros_like(r)\n out.append(fixed)\n return tuple(out)\n", "fpath_tuple": ["leopard-ai_betty", "betty", "utils.py"], "context_start_lineno": 0, "lineno": 123, "function_name": "replace_none_with_zero"}, "groundtruth": " out = []\n for t, r in zip(tensor_list, reference):\n fixed = t if t is not None else torch.zeros_like(r)\n out.append(fixed)\n return tuple(out)\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.configs import EngineConfig\nfrom betty.logging import logger\nfrom betty.misc.early_stopping import EarlyStopping\nfrom betty.utils import log_from_loss_dict\n\n\nclass Engine:\n \"\"\"\n ``Engine`` handles a dataflow graph based on the user-provided hierarchical problem\n dependencies. It also provides a primitive for executing multilevel optimization.\n \"\"\"\n\n def __init__(self, problems, config=None, dependencies=None, env=None):\n # config\n self.config = config if config is not None else EngineConfig()\n\n # step counters\n self.train_iters = 0\n self.valid_step = 0\n self.global_step = 0\n\n # logger\n self.logger_type = None\n self.logger = None\n\n # problem\n self.problems = problems\n self.leaves = []\n\n # dependencies\n self.dependencies = dependencies\n\n # env\n self.env = env\n\n # distributed\n self._strategy = None\n self._backend = None\n self._world_size = 0\n self._rank = 0\n self._local_rank = 0\n\n # early stopping\n self.early_stopping = None\n\n # roll back\n self._roll_back = False\n\n # device\n self.device = None\n\n # initialize\n self.initialize()\n\n def parse_config(self):\n \"\"\"\n Parse EngineConfig.\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/18", "ground_truth": " self.train_iters = self.config.train_iters\n self.valid_step = self.config.valid_step\n\n self.logger_type = self.config.logger_type\n\n self._roll_back = self.config.roll_back\n\n self._strategy = self.config.strategy\n self._backend = self.config.backend\n\n if self.config.early_stopping:\n self.early_stopping = EarlyStopping(\n metric=self.config.early_stopping_metric,\n mode=self.config.early_stopping_mode,\n tolerance=self.config.early_stopping_tolerance,\n )\n", "fpath_tuple": ["leopard-ai_betty", "betty", "engine.py"], "context_start_lineno": 0, "lineno": 68, "function_name": "parse_config"}, "groundtruth": " self.train_iters = self.config.train_iters\n self.valid_step = self.config.valid_step\n\n self.logger_type = self.config.logger_type\n\n self._roll_back = self.config.roll_back\n\n self._strategy = self.config.strategy\n self._backend = self.config.backend\n\n if self.config.early_stopping:\n self.early_stopping = EarlyStopping(\n metric=self.config.early_stopping_metric,\n mode=self.config.early_stopping_mode,\n tolerance=self.config.early_stopping_tolerance,\n )\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.configs import EngineConfig\nfrom betty.logging import logger\nfrom betty.misc.early_stopping import EarlyStopping\nfrom betty.utils import log_from_loss_dict\n\n\nclass Engine:\n \"\"\"\n ``Engine`` handles a dataflow graph based on the user-provided hierarchical problem\n dependencies. It also provides a primitive for executing multilevel optimization.\n \"\"\"\n\n def __init__(self, problems, config=None, dependencies=None, env=None):\n # config\n self.config = config if config is not None else EngineConfig()\n\n # step counters\n self.train_iters = 0\n self.valid_step = 0\n self.global_step = 0\n\n # logger\n self.logger_type = None\n self.logger = None\n\n # problem\n self.problems = problems\n self.leaves = []\n\n # dependencies\n self.dependencies = dependencies\n\n # env\n self.env = env\n\n # distributed\n self._strategy = None\n self._backend = None\n self._world_size = 0\n self._rank = 0\n self._local_rank = 0\n\n # early stopping\n self.early_stopping = None\n\n # roll back\n self._roll_back = False\n\n # device\n self.device = None\n\n # initialize\n self.initialize()\n\n def parse_config(self):\n \"\"\"\n Parse EngineConfig.\n \"\"\"\n self.train_iters = self.config.train_iters\n self.valid_step = self.config.valid_step\n\n self.logger_type = self.config.logger_type\n\n self._roll_back = self.config.roll_back\n\n self._strategy = self.config.strategy\n self._backend = self.config.backend\n\n if self.config.early_stopping:\n self.early_stopping = EarlyStopping(\n metric=self.config.early_stopping_metric,\n mode=self.config.early_stopping_mode,\n tolerance=self.config.early_stopping_tolerance,\n )\n\n def train_step(self):\n \"\"\"\n Running one-step gradient descent for all leaf problems.\n \"\"\"\n for leaf in self.leaves:\n leaf.step(global_step=self.global_step)\n\n def run(self):\n \"\"\"\n Execute multilevel optimization by running gradient descent for leaf problems.\n \"\"\"\n self.train()\n for it in range(1, self.train_iters + 1):\n self.global_step += 1\n self.train_step()\n\n if it % self.valid_step == 0 and self.do_validation():\n self.eval()\n validation_stats = self.validation() or {}\n log_loss = log_from_loss_dict(validation_stats)\n self.logger.info(\n f\"[Validation] [Global Step {self.global_step}] \" f\"{log_loss}\"\n )\n self.logger.log(\n validation_stats, tag=\"validation\", step=self.global_step\n )\n self.train()\n\n # early stopping\n if self.early_stopping is not None:\n stop = self.early_stopping(validation_stats)\n if stop:\n self.logger.info(\"Early stopping is executed!\")\n break\n\n def initialize(self):\n \"\"\"\n Initialize dependencies (computational graph) between problems.\n \"\"\"\n # Parse config\n self.parse_config()\n\n # initialize distributed training\n dist_dict = self.configure_systems()\n\n # initialize logger\n self.logger = logger(logger_type=self.logger_type)\n if self.is_rank_zero():\n self.logger.info(\"Initializing Multilevel Optimization...\\n\")\n start = time.time()\n\n # parse problem dependency\n self.parse_dependency()\n\n # set problem attributes\n for problem in self.problems:\n self.set_problem_attr(problem)\n\n # env initialization\n if self.env is not None:\n self.env.configure_distributed_training(dist_dict)\n self.env.configure_device(self.device)\n self.env.initialize()\n\n # problem initialization\n for problem in self.problems:\n problem.add_logger(self.logger)\n problem.configure_distributed_training(dist_dict)\n problem.configure_device(self.device)\n problem.configure_roll_back(self._roll_back)\n problem.initialize()\n if self.env is not None:\n problem.add_env(self.env)\n\n end = time.time()\n if self.is_rank_zero():\n self.logger.info(f\"Time spent on initialization: {end-start:.3f} (s)\\n\")\n\n def configure_systems(self):\n \"\"\"\n Configure basic systems set-up like distributed training and device placement.\n \"\"\"\n # configure distributed training\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n dist.init_process_group(backend=self._backend)\n\n self._world_size = dist.get_world_size()\n assert self._world_size > 1\n self._rank = dist.get_rank()\n\n device_count = torch.cuda.device_count()\n self._local_rank = self._rank % device_count\n\n dist_dict = {}\n dist_dict[\"strategy\"] = self._strategy\n dist_dict[\"backend\"] = self._backend\n dist_dict[\"world_size\"] = self._world_size\n dist_dict[\"rank\"] = self._rank\n dist_dict[\"local_rank\"] = self._local_rank\n\n # configure device for the current rank\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n torch.cuda.set_device(self._local_rank)\n self.device = torch.device(\"cuda\", self._local_rank)\n elif self._strategy == \"accelerate\":\n self.device = self.accelerator.device\n elif self._strategy == \"cpu\":\n self.device = \"cpu\"\n elif self._strategy == \"gpu\":\n self.device = \"cuda\"\n else:\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n return dist_dict\n\n def train(self):\n \"\"\"\n Set all problems in multilevel optimization to the train mode.\n \"\"\"\n for problem in self.problems:\n problem.train()\n\n def eval(self):\n \"\"\"\n Set all problems in multilevel optimization to the eval mode.\n \"\"\"\n for problem in self.problems:\n problem.eval()\n\n def check_leaf(self, problem):\n \"\"\"\n Check whether the given ``problem`` is a leaf problem or not.\n\n :param problem: Problem in multilevel optimization\n :type problem: Problem\n :return: True or False\n :rtype: bool\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/19", "ground_truth": " for _, value_list in self.dependencies[\"l2u\"].items():\n if problem in set(value_list):\n return False\n\n return True\n", "fpath_tuple": ["leopard-ai_betty", "betty", "engine.py"], "context_start_lineno": 0, "lineno": 223, "function_name": "check_leaf"}, "groundtruth": " for _, value_list in self.dependencies[\"l2u\"].items():\n if problem in set(value_list):\n return False\n\n return True\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.configs import EngineConfig\nfrom betty.logging import logger\nfrom betty.misc.early_stopping import EarlyStopping\nfrom betty.utils import log_from_loss_dict\n\n\nclass Engine:\n \"\"\"\n ``Engine`` handles a dataflow graph based on the user-provided hierarchical problem\n dependencies. It also provides a primitive for executing multilevel optimization.\n \"\"\"\n\n def __init__(self, problems, config=None, dependencies=None, env=None):\n # config\n self.config = config if config is not None else EngineConfig()\n\n # step counters\n self.train_iters = 0\n self.valid_step = 0\n self.global_step = 0\n\n # logger\n self.logger_type = None\n self.logger = None\n\n # problem\n self.problems = problems\n self.leaves = []\n\n # dependencies\n self.dependencies = dependencies\n\n # env\n self.env = env\n\n # distributed\n self._strategy = None\n self._backend = None\n self._world_size = 0\n self._rank = 0\n self._local_rank = 0\n\n # early stopping\n self.early_stopping = None\n\n # roll back\n self._roll_back = False\n\n # device\n self.device = None\n\n # initialize\n self.initialize()\n\n def parse_config(self):\n \"\"\"\n Parse EngineConfig.\n \"\"\"\n self.train_iters = self.config.train_iters\n self.valid_step = self.config.valid_step\n\n self.logger_type = self.config.logger_type\n\n self._roll_back = self.config.roll_back\n\n self._strategy = self.config.strategy\n self._backend = self.config.backend\n\n if self.config.early_stopping:\n self.early_stopping = EarlyStopping(\n metric=self.config.early_stopping_metric,\n mode=self.config.early_stopping_mode,\n tolerance=self.config.early_stopping_tolerance,\n )\n\n def train_step(self):\n \"\"\"\n Running one-step gradient descent for all leaf problems.\n \"\"\"\n for leaf in self.leaves:\n leaf.step(global_step=self.global_step)\n\n def run(self):\n \"\"\"\n Execute multilevel optimization by running gradient descent for leaf problems.\n \"\"\"\n self.train()\n for it in range(1, self.train_iters + 1):\n self.global_step += 1\n self.train_step()\n\n if it % self.valid_step == 0 and self.do_validation():\n self.eval()\n validation_stats = self.validation() or {}\n log_loss = log_from_loss_dict(validation_stats)\n self.logger.info(\n f\"[Validation] [Global Step {self.global_step}] \" f\"{log_loss}\"\n )\n self.logger.log(\n validation_stats, tag=\"validation\", step=self.global_step\n )\n self.train()\n\n # early stopping\n if self.early_stopping is not None:\n stop = self.early_stopping(validation_stats)\n if stop:\n self.logger.info(\"Early stopping is executed!\")\n break\n\n def initialize(self):\n \"\"\"\n Initialize dependencies (computational graph) between problems.\n \"\"\"\n # Parse config\n self.parse_config()\n\n # initialize distributed training\n dist_dict = self.configure_systems()\n\n # initialize logger\n self.logger = logger(logger_type=self.logger_type)\n if self.is_rank_zero():\n self.logger.info(\"Initializing Multilevel Optimization...\\n\")\n start = time.time()\n\n # parse problem dependency\n self.parse_dependency()\n\n # set problem attributes\n for problem in self.problems:\n self.set_problem_attr(problem)\n\n # env initialization\n if self.env is not None:\n self.env.configure_distributed_training(dist_dict)\n self.env.configure_device(self.device)\n self.env.initialize()\n\n # problem initialization\n for problem in self.problems:\n problem.add_logger(self.logger)\n problem.configure_distributed_training(dist_dict)\n problem.configure_device(self.device)\n problem.configure_roll_back(self._roll_back)\n problem.initialize()\n if self.env is not None:\n problem.add_env(self.env)\n\n end = time.time()\n if self.is_rank_zero():\n self.logger.info(f\"Time spent on initialization: {end-start:.3f} (s)\\n\")\n\n def configure_systems(self):\n \"\"\"\n Configure basic systems set-up like distributed training and device placement.\n \"\"\"\n # configure distributed training\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n dist.init_process_group(backend=self._backend)\n\n self._world_size = dist.get_world_size()\n assert self._world_size > 1\n self._rank = dist.get_rank()\n\n device_count = torch.cuda.device_count()\n self._local_rank = self._rank % device_count\n\n dist_dict = {}\n dist_dict[\"strategy\"] = self._strategy\n dist_dict[\"backend\"] = self._backend\n dist_dict[\"world_size\"] = self._world_size\n dist_dict[\"rank\"] = self._rank\n dist_dict[\"local_rank\"] = self._local_rank\n\n # configure device for the current rank\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n torch.cuda.set_device(self._local_rank)\n self.device = torch.device(\"cuda\", self._local_rank)\n elif self._strategy == \"accelerate\":\n self.device = self.accelerator.device\n elif self._strategy == \"cpu\":\n self.device = \"cpu\"\n elif self._strategy == \"gpu\":\n self.device = \"cuda\"\n else:\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n return dist_dict\n\n def train(self):\n \"\"\"\n Set all problems in multilevel optimization to the train mode.\n \"\"\"\n for problem in self.problems:\n problem.train()\n\n def eval(self):\n \"\"\"\n Set all problems in multilevel optimization to the eval mode.\n \"\"\"\n for problem in self.problems:\n problem.eval()\n\n def check_leaf(self, problem):\n \"\"\"\n Check whether the given ``problem`` is a leaf problem or not.\n\n :param problem: Problem in multilevel optimization\n :type problem: Problem\n :return: True or False\n :rtype: bool\n \"\"\"\n for _, value_list in self.dependencies[\"l2u\"].items():\n if problem in set(value_list):\n return False\n\n return True\n\n def find_paths(self, src, dst):\n \"\"\"\n Find all paths from ``src`` to ``dst`` with a modified depth-first search algorithm.\n\n :param src: The end point of the upper-to-lower edge.\n :type src: Problem\n :param dst: The start point of the upper-to-lower edge.\n :type dst: Problem\n :return: List of all paths from ``src`` to ``dst``.\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/20", "ground_truth": " results = []\n path = [src]\n self.dfs(src, dst, path, results)\n assert len(results) > 0, f\"No path from {src.name} to {dst.name}!\"\n\n for i, _ in enumerate(results):\n results[i].reverse()\n results[i].append(dst)\n\n return results\n", "fpath_tuple": ["leopard-ai_betty", "betty", "engine.py"], "context_start_lineno": 0, "lineno": 239, "function_name": "find_paths"}, "groundtruth": " results = []\n path = [src]\n self.dfs(src, dst, path, results)\n assert len(results) > 0, f\"No path from {src.name} to {dst.name}!\"\n\n for i, _ in enumerate(results):\n results[i].reverse()\n results[i].append(dst)\n\n return results\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.configs import EngineConfig\nfrom betty.logging import logger\nfrom betty.misc.early_stopping import EarlyStopping\nfrom betty.utils import log_from_loss_dict\n\n\nclass Engine:\n \"\"\"\n ``Engine`` handles a dataflow graph based on the user-provided hierarchical problem\n dependencies. It also provides a primitive for executing multilevel optimization.\n \"\"\"\n\n def __init__(self, problems, config=None, dependencies=None, env=None):\n # config\n self.config = config if config is not None else EngineConfig()\n\n # step counters\n self.train_iters = 0\n self.valid_step = 0\n self.global_step = 0\n\n # logger\n self.logger_type = None\n self.logger = None\n\n # problem\n self.problems = problems\n self.leaves = []\n\n # dependencies\n self.dependencies = dependencies\n\n # env\n self.env = env\n\n # distributed\n self._strategy = None\n self._backend = None\n self._world_size = 0\n self._rank = 0\n self._local_rank = 0\n\n # early stopping\n self.early_stopping = None\n\n # roll back\n self._roll_back = False\n\n # device\n self.device = None\n\n # initialize\n self.initialize()\n\n def parse_config(self):\n \"\"\"\n Parse EngineConfig.\n \"\"\"\n self.train_iters = self.config.train_iters\n self.valid_step = self.config.valid_step\n\n self.logger_type = self.config.logger_type\n\n self._roll_back = self.config.roll_back\n\n self._strategy = self.config.strategy\n self._backend = self.config.backend\n\n if self.config.early_stopping:\n self.early_stopping = EarlyStopping(\n metric=self.config.early_stopping_metric,\n mode=self.config.early_stopping_mode,\n tolerance=self.config.early_stopping_tolerance,\n )\n\n def train_step(self):\n \"\"\"\n Running one-step gradient descent for all leaf problems.\n \"\"\"\n for leaf in self.leaves:\n leaf.step(global_step=self.global_step)\n\n def run(self):\n \"\"\"\n Execute multilevel optimization by running gradient descent for leaf problems.\n \"\"\"\n self.train()\n for it in range(1, self.train_iters + 1):\n self.global_step += 1\n self.train_step()\n\n if it % self.valid_step == 0 and self.do_validation():\n self.eval()\n validation_stats = self.validation() or {}\n log_loss = log_from_loss_dict(validation_stats)\n self.logger.info(\n f\"[Validation] [Global Step {self.global_step}] \" f\"{log_loss}\"\n )\n self.logger.log(\n validation_stats, tag=\"validation\", step=self.global_step\n )\n self.train()\n\n # early stopping\n if self.early_stopping is not None:\n stop = self.early_stopping(validation_stats)\n if stop:\n self.logger.info(\"Early stopping is executed!\")\n break\n\n def initialize(self):\n \"\"\"\n Initialize dependencies (computational graph) between problems.\n \"\"\"\n # Parse config\n self.parse_config()\n\n # initialize distributed training\n dist_dict = self.configure_systems()\n\n # initialize logger\n self.logger = logger(logger_type=self.logger_type)\n if self.is_rank_zero():\n self.logger.info(\"Initializing Multilevel Optimization...\\n\")\n start = time.time()\n\n # parse problem dependency\n self.parse_dependency()\n\n # set problem attributes\n for problem in self.problems:\n self.set_problem_attr(problem)\n\n # env initialization\n if self.env is not None:\n self.env.configure_distributed_training(dist_dict)\n self.env.configure_device(self.device)\n self.env.initialize()\n\n # problem initialization\n for problem in self.problems:\n problem.add_logger(self.logger)\n problem.configure_distributed_training(dist_dict)\n problem.configure_device(self.device)\n problem.configure_roll_back(self._roll_back)\n problem.initialize()\n if self.env is not None:\n problem.add_env(self.env)\n\n end = time.time()\n if self.is_rank_zero():\n self.logger.info(f\"Time spent on initialization: {end-start:.3f} (s)\\n\")\n\n def configure_systems(self):\n \"\"\"\n Configure basic systems set-up like distributed training and device placement.\n \"\"\"\n # configure distributed training\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n dist.init_process_group(backend=self._backend)\n\n self._world_size = dist.get_world_size()\n assert self._world_size > 1\n self._rank = dist.get_rank()\n\n device_count = torch.cuda.device_count()\n self._local_rank = self._rank % device_count\n\n dist_dict = {}\n dist_dict[\"strategy\"] = self._strategy\n dist_dict[\"backend\"] = self._backend\n dist_dict[\"world_size\"] = self._world_size\n dist_dict[\"rank\"] = self._rank\n dist_dict[\"local_rank\"] = self._local_rank\n\n # configure device for the current rank\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n torch.cuda.set_device(self._local_rank)\n self.device = torch.device(\"cuda\", self._local_rank)\n elif self._strategy == \"accelerate\":\n self.device = self.accelerator.device\n elif self._strategy == \"cpu\":\n self.device = \"cpu\"\n elif self._strategy == \"gpu\":\n self.device = \"cuda\"\n else:\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n return dist_dict\n\n def train(self):\n \"\"\"\n Set all problems in multilevel optimization to the train mode.\n \"\"\"\n for problem in self.problems:\n problem.train()\n\n def eval(self):\n \"\"\"\n Set all problems in multilevel optimization to the eval mode.\n \"\"\"\n for problem in self.problems:\n problem.eval()\n\n def check_leaf(self, problem):\n \"\"\"\n Check whether the given ``problem`` is a leaf problem or not.\n\n :param problem: Problem in multilevel optimization\n :type problem: Problem\n :return: True or False\n :rtype: bool\n \"\"\"\n for _, value_list in self.dependencies[\"l2u\"].items():\n if problem in set(value_list):\n return False\n\n return True\n\n def find_paths(self, src, dst):\n \"\"\"\n Find all paths from ``src`` to ``dst`` with a modified depth-first search algorithm.\n\n :param src: The end point of the upper-to-lower edge.\n :type src: Problem\n :param dst: The start point of the upper-to-lower edge.\n :type dst: Problem\n :return: List of all paths from ``src`` to ``dst``.\n \"\"\"\n results = []\n path = [src]\n self.dfs(src, dst, path, results)\n assert len(results) > 0, f\"No path from {src.name} to {dst.name}!\"\n\n for i, _ in enumerate(results):\n results[i].reverse()\n results[i].append(dst)\n\n return results\n\n def dfs(self, src, dst, path, results):", "metadata": {"task_id": "leopard-ai--betty/21", "ground_truth": " if src is dst:\n assert len(path) > 1\n result = [node for node in path]\n results.append(result)\n elif src not in self.dependencies[\"l2u\"]:\n return\n else:\n for adj in self.dependencies[\"l2u\"][src]:\n path.append(adj)\n self.dfs(adj, dst, path, results)\n path.pop()\n", "fpath_tuple": ["leopard-ai_betty", "betty", "engine.py"], "context_start_lineno": 0, "lineno": 251, "function_name": "dfs"}, "groundtruth": " if src is dst:\n assert len(path) > 1\n result = [node for node in path]\n results.append(result)\n elif src not in self.dependencies[\"l2u\"]:\n return\n else:\n for adj in self.dependencies[\"l2u\"][src]:\n path.append(adj)\n self.dfs(adj, dst, path, results)\n path.pop()\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.configs import EngineConfig\nfrom betty.logging import logger\nfrom betty.misc.early_stopping import EarlyStopping\nfrom betty.utils import log_from_loss_dict\n\n\nclass Engine:\n \"\"\"\n ``Engine`` handles a dataflow graph based on the user-provided hierarchical problem\n dependencies. It also provides a primitive for executing multilevel optimization.\n \"\"\"\n\n def __init__(self, problems, config=None, dependencies=None, env=None):\n # config\n self.config = config if config is not None else EngineConfig()\n\n # step counters\n self.train_iters = 0\n self.valid_step = 0\n self.global_step = 0\n\n # logger\n self.logger_type = None\n self.logger = None\n\n # problem\n self.problems = problems\n self.leaves = []\n\n # dependencies\n self.dependencies = dependencies\n\n # env\n self.env = env\n\n # distributed\n self._strategy = None\n self._backend = None\n self._world_size = 0\n self._rank = 0\n self._local_rank = 0\n\n # early stopping\n self.early_stopping = None\n\n # roll back\n self._roll_back = False\n\n # device\n self.device = None\n\n # initialize\n self.initialize()\n\n def parse_config(self):\n \"\"\"\n Parse EngineConfig.\n \"\"\"\n self.train_iters = self.config.train_iters\n self.valid_step = self.config.valid_step\n\n self.logger_type = self.config.logger_type\n\n self._roll_back = self.config.roll_back\n\n self._strategy = self.config.strategy\n self._backend = self.config.backend\n\n if self.config.early_stopping:\n self.early_stopping = EarlyStopping(\n metric=self.config.early_stopping_metric,\n mode=self.config.early_stopping_mode,\n tolerance=self.config.early_stopping_tolerance,\n )\n\n def train_step(self):\n \"\"\"\n Running one-step gradient descent for all leaf problems.\n \"\"\"\n for leaf in self.leaves:\n leaf.step(global_step=self.global_step)\n\n def run(self):\n \"\"\"\n Execute multilevel optimization by running gradient descent for leaf problems.\n \"\"\"\n self.train()\n for it in range(1, self.train_iters + 1):\n self.global_step += 1\n self.train_step()\n\n if it % self.valid_step == 0 and self.do_validation():\n self.eval()\n validation_stats = self.validation() or {}\n log_loss = log_from_loss_dict(validation_stats)\n self.logger.info(\n f\"[Validation] [Global Step {self.global_step}] \" f\"{log_loss}\"\n )\n self.logger.log(\n validation_stats, tag=\"validation\", step=self.global_step\n )\n self.train()\n\n # early stopping\n if self.early_stopping is not None:\n stop = self.early_stopping(validation_stats)\n if stop:\n self.logger.info(\"Early stopping is executed!\")\n break\n\n def initialize(self):\n \"\"\"\n Initialize dependencies (computational graph) between problems.\n \"\"\"\n # Parse config\n self.parse_config()\n\n # initialize distributed training\n dist_dict = self.configure_systems()\n\n # initialize logger\n self.logger = logger(logger_type=self.logger_type)\n if self.is_rank_zero():\n self.logger.info(\"Initializing Multilevel Optimization...\\n\")\n start = time.time()\n\n # parse problem dependency\n self.parse_dependency()\n\n # set problem attributes\n for problem in self.problems:\n self.set_problem_attr(problem)\n\n # env initialization\n if self.env is not None:\n self.env.configure_distributed_training(dist_dict)\n self.env.configure_device(self.device)\n self.env.initialize()\n\n # problem initialization\n for problem in self.problems:\n problem.add_logger(self.logger)\n problem.configure_distributed_training(dist_dict)\n problem.configure_device(self.device)\n problem.configure_roll_back(self._roll_back)\n problem.initialize()\n if self.env is not None:\n problem.add_env(self.env)\n\n end = time.time()\n if self.is_rank_zero():\n self.logger.info(f\"Time spent on initialization: {end-start:.3f} (s)\\n\")\n\n def configure_systems(self):\n \"\"\"\n Configure basic systems set-up like distributed training and device placement.\n \"\"\"\n # configure distributed training\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n dist.init_process_group(backend=self._backend)\n\n self._world_size = dist.get_world_size()\n assert self._world_size > 1\n self._rank = dist.get_rank()\n\n device_count = torch.cuda.device_count()\n self._local_rank = self._rank % device_count\n\n dist_dict = {}\n dist_dict[\"strategy\"] = self._strategy\n dist_dict[\"backend\"] = self._backend\n dist_dict[\"world_size\"] = self._world_size\n dist_dict[\"rank\"] = self._rank\n dist_dict[\"local_rank\"] = self._local_rank\n\n # configure device for the current rank\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n torch.cuda.set_device(self._local_rank)\n self.device = torch.device(\"cuda\", self._local_rank)\n elif self._strategy == \"accelerate\":\n self.device = self.accelerator.device\n elif self._strategy == \"cpu\":\n self.device = \"cpu\"\n elif self._strategy == \"gpu\":\n self.device = \"cuda\"\n else:\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n return dist_dict\n\n def train(self):\n \"\"\"\n Set all problems in multilevel optimization to the train mode.\n \"\"\"\n for problem in self.problems:\n problem.train()\n\n def eval(self):\n \"\"\"\n Set all problems in multilevel optimization to the eval mode.\n \"\"\"\n for problem in self.problems:\n problem.eval()\n\n def check_leaf(self, problem):\n \"\"\"\n Check whether the given ``problem`` is a leaf problem or not.\n\n :param problem: Problem in multilevel optimization\n :type problem: Problem\n :return: True or False\n :rtype: bool\n \"\"\"\n for _, value_list in self.dependencies[\"l2u\"].items():\n if problem in set(value_list):\n return False\n\n return True\n\n def find_paths(self, src, dst):\n \"\"\"\n Find all paths from ``src`` to ``dst`` with a modified depth-first search algorithm.\n\n :param src: The end point of the upper-to-lower edge.\n :type src: Problem\n :param dst: The start point of the upper-to-lower edge.\n :type dst: Problem\n :return: List of all paths from ``src`` to ``dst``.\n \"\"\"\n results = []\n path = [src]\n self.dfs(src, dst, path, results)\n assert len(results) > 0, f\"No path from {src.name} to {dst.name}!\"\n\n for i, _ in enumerate(results):\n results[i].reverse()\n results[i].append(dst)\n\n return results\n\n def dfs(self, src, dst, path, results):\n if src is dst:\n assert len(path) > 1\n result = [node for node in path]\n results.append(result)\n elif src not in self.dependencies[\"l2u\"]:\n return\n else:\n for adj in self.dependencies[\"l2u\"][src]:\n path.append(adj)\n self.dfs(adj, dst, path, results)\n path.pop()\n\n def parse_dependency(self):\n \"\"\"\n Parse user-provided ``u2l`` and ``l2u`` dependencies to figure out 1) topological order for\n multilevel optimization execution, and 2) backpropagation path(s) for each problem. A\n modified depth-first search algorithm is used.\n \"\"\"\n # Parse upper-to-lower dependency", "metadata": {"task_id": "leopard-ai--betty/22", "ground_truth": " for key, value_list in self.dependencies[\"u2l\"].items():\n for value in value_list:\n # find all paths from low to high for backpropagation\n paths = self.find_paths(src=value, dst=key)\n key.add_paths(paths)\n\n # Parse lower-to-upper dependency\n for key, value_list in self.dependencies[\"l2u\"].items():\n for value in value_list:\n # add value problem to parents of key problem for backpropgation\n key.add_parent(value)\n value.add_child(key)\n\n # Parse problems\n for problem in self.problems:\n if self.check_leaf(problem):\n problem.leaf = True\n self.leaves.append(problem)\n", "fpath_tuple": ["leopard-ai_betty", "betty", "engine.py"], "context_start_lineno": 0, "lineno": 270, "function_name": "parse_dependency"}, "groundtruth": " for key, value_list in self.dependencies[\"u2l\"].items():\n for value in value_list:\n # find all paths from low to high for backpropagation\n paths = self.find_paths(src=value, dst=key)\n key.add_paths(paths)\n\n # Parse lower-to-upper dependency\n for key, value_list in self.dependencies[\"l2u\"].items():\n for value in value_list:\n # add value problem to parents of key problem for backpropgation\n key.add_parent(value)\n value.add_child(key)\n\n # Parse problems\n for problem in self.problems:\n if self.check_leaf(problem):\n problem.leaf = True\n self.leaves.append(problem)\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.configs import EngineConfig\nfrom betty.logging import logger\nfrom betty.misc.early_stopping import EarlyStopping\nfrom betty.utils import log_from_loss_dict\n\n\nclass Engine:\n \"\"\"\n ``Engine`` handles a dataflow graph based on the user-provided hierarchical problem\n dependencies. It also provides a primitive for executing multilevel optimization.\n \"\"\"\n\n def __init__(self, problems, config=None, dependencies=None, env=None):\n # config\n self.config = config if config is not None else EngineConfig()\n\n # step counters\n self.train_iters = 0\n self.valid_step = 0\n self.global_step = 0\n\n # logger\n self.logger_type = None\n self.logger = None\n\n # problem\n self.problems = problems\n self.leaves = []\n\n # dependencies\n self.dependencies = dependencies\n\n # env\n self.env = env\n\n # distributed\n self._strategy = None\n self._backend = None\n self._world_size = 0\n self._rank = 0\n self._local_rank = 0\n\n # early stopping\n self.early_stopping = None\n\n # roll back\n self._roll_back = False\n\n # device\n self.device = None\n\n # initialize\n self.initialize()\n\n def parse_config(self):\n \"\"\"\n Parse EngineConfig.\n \"\"\"\n self.train_iters = self.config.train_iters\n self.valid_step = self.config.valid_step\n\n self.logger_type = self.config.logger_type\n\n self._roll_back = self.config.roll_back\n\n self._strategy = self.config.strategy\n self._backend = self.config.backend\n\n if self.config.early_stopping:\n self.early_stopping = EarlyStopping(\n metric=self.config.early_stopping_metric,\n mode=self.config.early_stopping_mode,\n tolerance=self.config.early_stopping_tolerance,\n )\n\n def train_step(self):\n \"\"\"\n Running one-step gradient descent for all leaf problems.\n \"\"\"\n for leaf in self.leaves:\n leaf.step(global_step=self.global_step)\n\n def run(self):\n \"\"\"\n Execute multilevel optimization by running gradient descent for leaf problems.\n \"\"\"\n self.train()\n for it in range(1, self.train_iters + 1):\n self.global_step += 1\n self.train_step()\n\n if it % self.valid_step == 0 and self.do_validation():\n self.eval()\n validation_stats = self.validation() or {}\n log_loss = log_from_loss_dict(validation_stats)\n self.logger.info(\n f\"[Validation] [Global Step {self.global_step}] \" f\"{log_loss}\"\n )\n self.logger.log(\n validation_stats, tag=\"validation\", step=self.global_step\n )\n self.train()\n\n # early stopping\n if self.early_stopping is not None:\n stop = self.early_stopping(validation_stats)\n if stop:\n self.logger.info(\"Early stopping is executed!\")\n break\n\n def initialize(self):\n \"\"\"\n Initialize dependencies (computational graph) between problems.\n \"\"\"\n # Parse config\n self.parse_config()\n\n # initialize distributed training\n dist_dict = self.configure_systems()\n\n # initialize logger\n self.logger = logger(logger_type=self.logger_type)\n if self.is_rank_zero():\n self.logger.info(\"Initializing Multilevel Optimization...\\n\")\n start = time.time()\n\n # parse problem dependency\n self.parse_dependency()\n\n # set problem attributes\n for problem in self.problems:\n self.set_problem_attr(problem)\n\n # env initialization\n if self.env is not None:\n self.env.configure_distributed_training(dist_dict)\n self.env.configure_device(self.device)\n self.env.initialize()\n\n # problem initialization\n for problem in self.problems:\n problem.add_logger(self.logger)\n problem.configure_distributed_training(dist_dict)\n problem.configure_device(self.device)\n problem.configure_roll_back(self._roll_back)\n problem.initialize()\n if self.env is not None:\n problem.add_env(self.env)\n\n end = time.time()\n if self.is_rank_zero():\n self.logger.info(f\"Time spent on initialization: {end-start:.3f} (s)\\n\")\n\n def configure_systems(self):\n \"\"\"\n Configure basic systems set-up like distributed training and device placement.\n \"\"\"\n # configure distributed training\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n dist.init_process_group(backend=self._backend)\n\n self._world_size = dist.get_world_size()\n assert self._world_size > 1\n self._rank = dist.get_rank()\n\n device_count = torch.cuda.device_count()\n self._local_rank = self._rank % device_count\n\n dist_dict = {}\n dist_dict[\"strategy\"] = self._strategy\n dist_dict[\"backend\"] = self._backend\n dist_dict[\"world_size\"] = self._world_size\n dist_dict[\"rank\"] = self._rank\n dist_dict[\"local_rank\"] = self._local_rank\n\n # configure device for the current rank\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n torch.cuda.set_device(self._local_rank)\n self.device = torch.device(\"cuda\", self._local_rank)\n elif self._strategy == \"accelerate\":\n self.device = self.accelerator.device\n elif self._strategy == \"cpu\":\n self.device = \"cpu\"\n elif self._strategy == \"gpu\":\n self.device = \"cuda\"\n else:\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n return dist_dict\n\n def train(self):\n \"\"\"\n Set all problems in multilevel optimization to the train mode.\n \"\"\"\n for problem in self.problems:\n problem.train()\n\n def eval(self):\n \"\"\"\n Set all problems in multilevel optimization to the eval mode.\n \"\"\"\n for problem in self.problems:\n problem.eval()\n\n def check_leaf(self, problem):\n \"\"\"\n Check whether the given ``problem`` is a leaf problem or not.\n\n :param problem: Problem in multilevel optimization\n :type problem: Problem\n :return: True or False\n :rtype: bool\n \"\"\"\n for _, value_list in self.dependencies[\"l2u\"].items():\n if problem in set(value_list):\n return False\n\n return True\n\n def find_paths(self, src, dst):\n \"\"\"\n Find all paths from ``src`` to ``dst`` with a modified depth-first search algorithm.\n\n :param src: The end point of the upper-to-lower edge.\n :type src: Problem\n :param dst: The start point of the upper-to-lower edge.\n :type dst: Problem\n :return: List of all paths from ``src`` to ``dst``.\n \"\"\"\n results = []\n path = [src]\n self.dfs(src, dst, path, results)\n assert len(results) > 0, f\"No path from {src.name} to {dst.name}!\"\n\n for i, _ in enumerate(results):\n results[i].reverse()\n results[i].append(dst)\n\n return results\n\n def dfs(self, src, dst, path, results):\n if src is dst:\n assert len(path) > 1\n result = [node for node in path]\n results.append(result)\n elif src not in self.dependencies[\"l2u\"]:\n return\n else:\n for adj in self.dependencies[\"l2u\"][src]:\n path.append(adj)\n self.dfs(adj, dst, path, results)\n path.pop()\n\n def parse_dependency(self):\n \"\"\"\n Parse user-provided ``u2l`` and ``l2u`` dependencies to figure out 1) topological order for\n multilevel optimization execution, and 2) backpropagation path(s) for each problem. A\n modified depth-first search algorithm is used.\n \"\"\"\n # Parse upper-to-lower dependency\n for key, value_list in self.dependencies[\"u2l\"].items():\n for value in value_list:\n # find all paths from low to high for backpropagation\n paths = self.find_paths(src=value, dst=key)\n key.add_paths(paths)\n\n # Parse lower-to-upper dependency\n for key, value_list in self.dependencies[\"l2u\"].items():\n for value in value_list:\n # add value problem to parents of key problem for backpropgation\n key.add_parent(value)\n value.add_child(key)\n\n # Parse problems\n for problem in self.problems:\n if self.check_leaf(problem):\n problem.leaf = True\n self.leaves.append(problem)\n\n def set_dependency(self, dependencies):\n self.dependencies = dependencies\n self.leaves = []\n\n # clear existing dependencies\n for problem in self.problems:\n problem.leaf = False\n problem.clear_dependencies()\n\n self.parse_dependency()\n\n def set_problem_attr(self, problem):\n \"\"\"\n Set class attribute for the given ``problem`` based on their names\n\n :param problem: Problem in multilevel optimization\n :type problem: Problem\n :return: ``problem`` name\n :rtype: str\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/23", "ground_truth": " name = problem.name\n\n # set attribute for Engine\n assert not hasattr(self, name), f\"Problem already has a problelm named {name}!\"\n setattr(self, name, problem)\n\n # set attribute for Problems\n for prob in self.problems:\n if prob != problem:\n assert not hasattr(problem, name)\n setattr(prob, name, problem)\n\n # set attribute for Env\n if self.env is not None:\n setattr(self.env, name, problem)\n\n return name\n", "fpath_tuple": ["leopard-ai_betty", "betty", "engine.py"], "context_start_lineno": 0, "lineno": 309, "function_name": "set_problem_attr"}, "groundtruth": " name = problem.name\n\n # set attribute for Engine\n assert not hasattr(self, name), f\"Problem already has a problelm named {name}!\"\n setattr(self, name, problem)\n\n # set attribute for Problems\n for prob in self.problems:\n if prob != problem:\n assert not hasattr(problem, name)\n setattr(prob, name, problem)\n\n # set attribute for Env\n if self.env is not None:\n setattr(self.env, name, problem)\n\n return name\n"} +{"prompt": "# Copyright Sang Keun Choe\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport time\n\nimport torch\nimport torch.distributed as dist\n\nfrom betty.configs import EngineConfig\nfrom betty.logging import logger\nfrom betty.misc.early_stopping import EarlyStopping\nfrom betty.utils import log_from_loss_dict\n\n\nclass Engine:\n \"\"\"\n ``Engine`` handles a dataflow graph based on the user-provided hierarchical problem\n dependencies. It also provides a primitive for executing multilevel optimization.\n \"\"\"\n\n def __init__(self, problems, config=None, dependencies=None, env=None):\n # config\n self.config = config if config is not None else EngineConfig()\n\n # step counters\n self.train_iters = 0\n self.valid_step = 0\n self.global_step = 0\n\n # logger\n self.logger_type = None\n self.logger = None\n\n # problem\n self.problems = problems\n self.leaves = []\n\n # dependencies\n self.dependencies = dependencies\n\n # env\n self.env = env\n\n # distributed\n self._strategy = None\n self._backend = None\n self._world_size = 0\n self._rank = 0\n self._local_rank = 0\n\n # early stopping\n self.early_stopping = None\n\n # roll back\n self._roll_back = False\n\n # device\n self.device = None\n\n # initialize\n self.initialize()\n\n def parse_config(self):\n \"\"\"\n Parse EngineConfig.\n \"\"\"\n self.train_iters = self.config.train_iters\n self.valid_step = self.config.valid_step\n\n self.logger_type = self.config.logger_type\n\n self._roll_back = self.config.roll_back\n\n self._strategy = self.config.strategy\n self._backend = self.config.backend\n\n if self.config.early_stopping:\n self.early_stopping = EarlyStopping(\n metric=self.config.early_stopping_metric,\n mode=self.config.early_stopping_mode,\n tolerance=self.config.early_stopping_tolerance,\n )\n\n def train_step(self):\n \"\"\"\n Running one-step gradient descent for all leaf problems.\n \"\"\"\n for leaf in self.leaves:\n leaf.step(global_step=self.global_step)\n\n def run(self):\n \"\"\"\n Execute multilevel optimization by running gradient descent for leaf problems.\n \"\"\"\n self.train()\n for it in range(1, self.train_iters + 1):\n self.global_step += 1\n self.train_step()\n\n if it % self.valid_step == 0 and self.do_validation():\n self.eval()\n validation_stats = self.validation() or {}\n log_loss = log_from_loss_dict(validation_stats)\n self.logger.info(\n f\"[Validation] [Global Step {self.global_step}] \" f\"{log_loss}\"\n )\n self.logger.log(\n validation_stats, tag=\"validation\", step=self.global_step\n )\n self.train()\n\n # early stopping\n if self.early_stopping is not None:\n stop = self.early_stopping(validation_stats)\n if stop:\n self.logger.info(\"Early stopping is executed!\")\n break\n\n def initialize(self):\n \"\"\"\n Initialize dependencies (computational graph) between problems.\n \"\"\"\n # Parse config\n self.parse_config()\n\n # initialize distributed training\n dist_dict = self.configure_systems()\n\n # initialize logger\n self.logger = logger(logger_type=self.logger_type)\n if self.is_rank_zero():\n self.logger.info(\"Initializing Multilevel Optimization...\\n\")\n start = time.time()\n\n # parse problem dependency\n self.parse_dependency()\n\n # set problem attributes\n for problem in self.problems:\n self.set_problem_attr(problem)\n\n # env initialization\n if self.env is not None:\n self.env.configure_distributed_training(dist_dict)\n self.env.configure_device(self.device)\n self.env.initialize()\n\n # problem initialization\n for problem in self.problems:\n problem.add_logger(self.logger)\n problem.configure_distributed_training(dist_dict)\n problem.configure_device(self.device)\n problem.configure_roll_back(self._roll_back)\n problem.initialize()\n if self.env is not None:\n problem.add_env(self.env)\n\n end = time.time()\n if self.is_rank_zero():\n self.logger.info(f\"Time spent on initialization: {end-start:.3f} (s)\\n\")\n\n def configure_systems(self):\n \"\"\"\n Configure basic systems set-up like distributed training and device placement.\n \"\"\"\n # configure distributed training\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n dist.init_process_group(backend=self._backend)\n\n self._world_size = dist.get_world_size()\n assert self._world_size > 1\n self._rank = dist.get_rank()\n\n device_count = torch.cuda.device_count()\n self._local_rank = self._rank % device_count\n\n dist_dict = {}\n dist_dict[\"strategy\"] = self._strategy\n dist_dict[\"backend\"] = self._backend\n dist_dict[\"world_size\"] = self._world_size\n dist_dict[\"rank\"] = self._rank\n dist_dict[\"local_rank\"] = self._local_rank\n\n # configure device for the current rank\n if self._strategy in [\"distributed\", \"zero\", \"fsdp\"]:\n torch.cuda.set_device(self._local_rank)\n self.device = torch.device(\"cuda\", self._local_rank)\n elif self._strategy == \"accelerate\":\n self.device = self.accelerator.device\n elif self._strategy == \"cpu\":\n self.device = \"cpu\"\n elif self._strategy == \"gpu\":\n self.device = \"cuda\"\n else:\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n return dist_dict\n\n def train(self):\n \"\"\"\n Set all problems in multilevel optimization to the train mode.\n \"\"\"\n for problem in self.problems:\n problem.train()\n\n def eval(self):\n \"\"\"\n Set all problems in multilevel optimization to the eval mode.\n \"\"\"\n for problem in self.problems:\n problem.eval()\n\n def check_leaf(self, problem):\n \"\"\"\n Check whether the given ``problem`` is a leaf problem or not.\n\n :param problem: Problem in multilevel optimization\n :type problem: Problem\n :return: True or False\n :rtype: bool\n \"\"\"\n for _, value_list in self.dependencies[\"l2u\"].items():\n if problem in set(value_list):\n return False\n\n return True\n\n def find_paths(self, src, dst):\n \"\"\"\n Find all paths from ``src`` to ``dst`` with a modified depth-first search algorithm.\n\n :param src: The end point of the upper-to-lower edge.\n :type src: Problem\n :param dst: The start point of the upper-to-lower edge.\n :type dst: Problem\n :return: List of all paths from ``src`` to ``dst``.\n \"\"\"\n results = []\n path = [src]\n self.dfs(src, dst, path, results)\n assert len(results) > 0, f\"No path from {src.name} to {dst.name}!\"\n\n for i, _ in enumerate(results):\n results[i].reverse()\n results[i].append(dst)\n\n return results\n\n def dfs(self, src, dst, path, results):\n if src is dst:\n assert len(path) > 1\n result = [node for node in path]\n results.append(result)\n elif src not in self.dependencies[\"l2u\"]:\n return\n else:\n for adj in self.dependencies[\"l2u\"][src]:\n path.append(adj)\n self.dfs(adj, dst, path, results)\n path.pop()\n\n def parse_dependency(self):\n \"\"\"\n Parse user-provided ``u2l`` and ``l2u`` dependencies to figure out 1) topological order for\n multilevel optimization execution, and 2) backpropagation path(s) for each problem. A\n modified depth-first search algorithm is used.\n \"\"\"\n # Parse upper-to-lower dependency\n for key, value_list in self.dependencies[\"u2l\"].items():\n for value in value_list:\n # find all paths from low to high for backpropagation\n paths = self.find_paths(src=value, dst=key)\n key.add_paths(paths)\n\n # Parse lower-to-upper dependency\n for key, value_list in self.dependencies[\"l2u\"].items():\n for value in value_list:\n # add value problem to parents of key problem for backpropgation\n key.add_parent(value)\n value.add_child(key)\n\n # Parse problems\n for problem in self.problems:\n if self.check_leaf(problem):\n problem.leaf = True\n self.leaves.append(problem)\n\n def set_dependency(self, dependencies):\n self.dependencies = dependencies\n self.leaves = []\n\n # clear existing dependencies\n for problem in self.problems:\n problem.leaf = False\n problem.clear_dependencies()\n\n self.parse_dependency()\n\n def set_problem_attr(self, problem):\n \"\"\"\n Set class attribute for the given ``problem`` based on their names\n\n :param problem: Problem in multilevel optimization\n :type problem: Problem\n :return: ``problem`` name\n :rtype: str\n \"\"\"\n name = problem.name\n\n # set attribute for Engine\n assert not hasattr(self, name), f\"Problem already has a problelm named {name}!\"\n setattr(self, name, problem)\n\n # set attribute for Problems\n for prob in self.problems:\n if prob != problem:\n assert not hasattr(problem, name)\n setattr(prob, name, problem)\n\n # set attribute for Env\n if self.env is not None:\n setattr(self.env, name, problem)\n\n return name\n\n def do_validation(self):\n \"\"\"\n Check whether to run validation.\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/24", "ground_truth": " if self.is_implemented(\"validation\") and self.is_rank_zero():\n return True\n return False\n", "fpath_tuple": ["leopard-ai_betty", "betty", "engine.py"], "context_start_lineno": 0, "lineno": 331, "function_name": "do_validation"}, "groundtruth": " if self.is_implemented(\"validation\") and self.is_rank_zero():\n return True\n return False\n"} +{"prompt": "from torch.distributed.optim import ZeroRedundancyOptimizer\n\n\ndef patch_optimizer(optimizer, params, is_zero):", "metadata": {"task_id": "leopard-ai--betty/25", "ground_truth": " defaults = optimizer.defaults\n new_optimizer = None\n if is_zero:\n new_optimizer = ZeroRedundancyOptimizer(\n params=params,\n optimizer_class=optimizer.__class__,\n parameters_as_bucket_view=True,\n **defaults,\n )\n else:\n new_optimizer = optimizer.__class__(params, **defaults)\n\n return new_optimizer\n", "fpath_tuple": ["leopard-ai_betty", "betty", "patch", "optimizer.py"], "context_start_lineno": 0, "lineno": 4, "function_name": "patch_optimizer"}, "groundtruth": " defaults = optimizer.defaults\n new_optimizer = None\n if is_zero:\n new_optimizer = ZeroRedundancyOptimizer(\n params=params,\n optimizer_class=optimizer.__class__,\n parameters_as_bucket_view=True,\n **defaults,\n )\n else:\n new_optimizer = optimizer.__class__(params, **defaults)\n\n return new_optimizer\n"} +{"prompt": "import warnings\n\nimport torch\n\nfrom betty.utils import neg_with_none\n\n\ndef neumann(vector, curr, prev, sync):\n \"\"\"\n Approximate the matrix-vector multiplication with the best response Jacobian by the\n Neumann Series as proposed in\n `Optimizing Millions of Hyperparameters by Implicit Differentiation\n `_ based on implicit function theorem (IFT). Users may\n specify learning rate (``neumann_alpha``) and unrolling steps (``neumann_iterations``) in\n ``Config``.\n\n :param vector:\n Vector with which matrix-vector multiplication with best-response Jacobian (matrix) would\n be performed.\n :type vector: Sequence of Tensor\n :param curr: A current level problem\n :type curr: Problem\n :param prev: A directly lower-level problem to the current problem\n :type prev: Problem\n :return: (Intermediate) gradient\n :rtype: Sequence of Tensor\n \"\"\"\n # ! Mabye replace with child.loss by adding self.loss attribute to save computation", "metadata": {"task_id": "leopard-ai--betty/26", "ground_truth": " assert len(curr.paths) == 0, \"neumann method is not supported for higher order MLO!\"\n config = curr.config\n in_loss = curr.training_step_exec(curr.cur_batch)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n in_grad = torch.autograd.grad(\n in_loss, curr.trainable_parameters(), create_graph=True\n )\n v2 = approx_inverse_hvp(\n vector,\n in_grad,\n curr.trainable_parameters(),\n iterations=config.neumann_iterations,\n alpha=config.neumann_alpha,\n )\n if sync:\n v2 = [neg_with_none(x) for x in v2]\n torch.autograd.backward(\n in_grad, inputs=prev.trainable_parameters(), grad_tensors=v2\n )\n implicit_grad = None\n else:\n implicit_grad = torch.autograd.grad(\n in_grad, prev.trainable_parameters(), grad_outputs=v2\n )\n implicit_grad = [neg_with_none(ig) for ig in implicit_grad]\n\n return implicit_grad\n", "fpath_tuple": ["leopard-ai_betty", "betty", "hypergradient", "neumann.py"], "context_start_lineno": 0, "lineno": 28, "function_name": "neumann"}, "groundtruth": " assert len(curr.paths) == 0, \"neumann method is not supported for higher order MLO!\"\n config = curr.config\n in_loss = curr.training_step_exec(curr.cur_batch)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n in_grad = torch.autograd.grad(\n in_loss, curr.trainable_parameters(), create_graph=True\n )\n v2 = approx_inverse_hvp(\n vector,\n in_grad,\n curr.trainable_parameters(),\n iterations=config.neumann_iterations,\n alpha=config.neumann_alpha,\n )\n if sync:\n v2 = [neg_with_none(x) for x in v2]\n torch.autograd.backward(\n in_grad, inputs=prev.trainable_parameters(), grad_tensors=v2\n )\n implicit_grad = None\n else:\n implicit_grad = torch.autograd.grad(\n in_grad, prev.trainable_parameters(), grad_outputs=v2\n )\n implicit_grad = [neg_with_none(ig) for ig in implicit_grad]\n\n return implicit_grad\n"} +{"prompt": "import warnings\n\nimport torch\n\nfrom betty.utils import neg_with_none\n\n\ndef neumann(vector, curr, prev, sync):\n \"\"\"\n Approximate the matrix-vector multiplication with the best response Jacobian by the\n Neumann Series as proposed in\n `Optimizing Millions of Hyperparameters by Implicit Differentiation\n `_ based on implicit function theorem (IFT). Users may\n specify learning rate (``neumann_alpha``) and unrolling steps (``neumann_iterations``) in\n ``Config``.\n\n :param vector:\n Vector with which matrix-vector multiplication with best-response Jacobian (matrix) would\n be performed.\n :type vector: Sequence of Tensor\n :param curr: A current level problem\n :type curr: Problem\n :param prev: A directly lower-level problem to the current problem\n :type prev: Problem\n :return: (Intermediate) gradient\n :rtype: Sequence of Tensor\n \"\"\"\n # ! Mabye replace with child.loss by adding self.loss attribute to save computation\n assert len(curr.paths) == 0, \"neumann method is not supported for higher order MLO!\"\n config = curr.config\n in_loss = curr.training_step_exec(curr.cur_batch)\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n in_grad = torch.autograd.grad(\n in_loss, curr.trainable_parameters(), create_graph=True\n )\n v2 = approx_inverse_hvp(\n vector,\n in_grad,\n curr.trainable_parameters(),\n iterations=config.neumann_iterations,\n alpha=config.neumann_alpha,\n )\n if sync:\n v2 = [neg_with_none(x) for x in v2]\n torch.autograd.backward(\n in_grad, inputs=prev.trainable_parameters(), grad_tensors=v2\n )\n implicit_grad = None\n else:\n implicit_grad = torch.autograd.grad(\n in_grad, prev.trainable_parameters(), grad_outputs=v2\n )\n implicit_grad = [neg_with_none(ig) for ig in implicit_grad]\n\n return implicit_grad\n\n\ndef approx_inverse_hvp(v, f, params, iterations=3, alpha=1.0):", "metadata": {"task_id": "leopard-ai--betty/27", "ground_truth": " p = v\n for _ in range(iterations):\n hvp = torch.autograd.grad(f, params, grad_outputs=v, retain_graph=True)\n v = [v_i - alpha * hvp_i for v_i, hvp_i in zip(v, hvp)]\n p = [v_i + p_i for v_i, p_i in zip(v, p)]\n\n return [alpha * p_i for p_i in p]\n", "fpath_tuple": ["leopard-ai_betty", "betty", "hypergradient", "neumann.py"], "context_start_lineno": 0, "lineno": 59, "function_name": "approx_inverse_hvp"}, "groundtruth": " p = v\n for _ in range(iterations):\n hvp = torch.autograd.grad(f, params, grad_outputs=v, retain_graph=True)\n v = [v_i - alpha * hvp_i for v_i, hvp_i in zip(v, hvp)]\n p = [v_i + p_i for v_i, p_i in zip(v, p)]\n\n return [alpha * p_i for p_i in p]\n"} +{"prompt": "import math\nimport torch\n\n\ndef get_optimzer_type(optimizer):", "metadata": {"task_id": "leopard-ai--betty/28", "ground_truth": " cls_name = type(optimizer).__name__.lower()\n if \"adam\" in cls_name:\n return \"adam\"\n return \"sgd\"\n", "fpath_tuple": ["leopard-ai_betty", "betty", "hypergradient", "utils.py"], "context_start_lineno": 0, "lineno": 5, "function_name": "get_optimzer_type"}, "groundtruth": " cls_name = type(optimizer).__name__.lower()\n if \"adam\" in cls_name:\n return \"adam\"\n return \"sgd\"\n"} +{"prompt": "import torch\n\nfrom betty.utils import replace_none_with_zero\n\nfrom .darts import darts\nfrom .cg import cg\nfrom .neumann import neumann\nfrom .reinforce import reinforce\n\n\njvp_fn_mapping = {\n \"darts\": darts,\n \"neumann\": neumann,\n \"cg\": cg,\n \"reinforce\": reinforce,\n}\n\n\ndef get_grads(loss, path, retain_graph, do_sync):", "metadata": {"task_id": "leopard-ai--betty/29", "ground_truth": " jvp = torch.autograd.grad(\n loss,\n path[1].trainable_parameters(),\n retain_graph=retain_graph,\n allow_unused=True,\n )\n jvp = replace_none_with_zero(jvp, path[1].trainable_parameters())\n for i in range(1, len(path) - 1):\n jvp_fn_type = path[i].config.type\n assert jvp_fn_type in jvp_fn_mapping\n jvp_fn = jvp_fn_mapping[jvp_fn_type]\n sync = bool(do_sync and i == len(path) - 2)\n jvp = jvp_fn(jvp, path[i], path[i + 1], sync)\n\n return jvp\n", "fpath_tuple": ["leopard-ai_betty", "betty", "hypergradient", "__init__.py"], "context_start_lineno": 0, "lineno": 19, "function_name": "get_grads"}, "groundtruth": " jvp = torch.autograd.grad(\n loss,\n path[1].trainable_parameters(),\n retain_graph=retain_graph,\n allow_unused=True,\n )\n jvp = replace_none_with_zero(jvp, path[1].trainable_parameters())\n for i in range(1, len(path) - 1):\n jvp_fn_type = path[i].config.type\n assert jvp_fn_type in jvp_fn_mapping\n jvp_fn = jvp_fn_mapping[jvp_fn_type]\n sync = bool(do_sync and i == len(path) - 2)\n jvp = jvp_fn(jvp, path[i], path[i + 1], sync)\n\n return jvp\n"} +{"prompt": "import numpy as np\n\nimport torch\nimport torch.nn.functional as F\n\nfrom betty.engine import Engine\nfrom betty.configs import Config, EngineConfig\nfrom betty.problems import ImplicitProblem\n\n# hyperparameters\nDATA_NUM = 1000\nDATA_DIM = 20\n\n# data preparation\nw_gt = np.random.randn(DATA_DIM)\nx = np.random.randn(DATA_NUM, DATA_DIM)\ny = x @ w_gt + 0.1 * np.random.randn(DATA_NUM)\ny = (y > 0).astype(float)\n\nidx = DATA_NUM // 2\nx_train, x_val, y_train, y_val = x[:idx, :], x[idx:, :], y[:idx], y[idx:]\nx_train, y_train = (\n torch.from_numpy(x_train).float(),\n torch.from_numpy(y_train).float(),\n)\nx_val, y_val = (\n torch.from_numpy(x_val).float(),\n torch.from_numpy(y_val).float(),\n)\n\n\ndef make_data_loader(xs, ys):", "metadata": {"task_id": "leopard-ai--betty/30", "ground_truth": " datasets = [(xs, ys)]\n\n return datasets\n", "fpath_tuple": ["leopard-ai_betty", "betty", "test_install.py"], "context_start_lineno": 0, "lineno": 32, "function_name": "make_data_loader"}, "groundtruth": " datasets = [(xs, ys)]\n\n return datasets\n"} +{"prompt": "import numpy as np\n\nimport torch\nimport torch.nn.functional as F\n\nfrom betty.engine import Engine\nfrom betty.configs import Config, EngineConfig\nfrom betty.problems import ImplicitProblem\n\n# hyperparameters\nDATA_NUM = 1000\nDATA_DIM = 20\n\n# data preparation\nw_gt = np.random.randn(DATA_DIM)\nx = np.random.randn(DATA_NUM, DATA_DIM)\ny = x @ w_gt + 0.1 * np.random.randn(DATA_NUM)\ny = (y > 0).astype(float)\n\nidx = DATA_NUM // 2\nx_train, x_val, y_train, y_val = x[:idx, :], x[idx:, :], y[:idx], y[idx:]\nx_train, y_train = (\n torch.from_numpy(x_train).float(),\n torch.from_numpy(y_train).float(),\n)\nx_val, y_val = (\n torch.from_numpy(x_val).float(),\n torch.from_numpy(y_val).float(),\n)\n\n\ndef make_data_loader(xs, ys):\n datasets = [(xs, ys)]\n\n return datasets\n\n\nclass ChildNet(torch.nn.Module):\n def __init__(self) -> None:", "metadata": {"task_id": "leopard-ai--betty/31", "ground_truth": " super().__init__()\n\n self.w = torch.nn.Parameter(torch.zeros(DATA_DIM))\n", "fpath_tuple": ["leopard-ai_betty", "betty", "test_install.py"], "context_start_lineno": 0, "lineno": 39, "function_name": "__init__"}, "groundtruth": " super().__init__()\n\n self.w = torch.nn.Parameter(torch.zeros(DATA_DIM))\n"} +{"prompt": "import numpy as np\n\nimport torch\nimport torch.nn.functional as F\n\nfrom betty.engine import Engine\nfrom betty.configs import Config, EngineConfig\nfrom betty.problems import ImplicitProblem\n\n# hyperparameters\nDATA_NUM = 1000\nDATA_DIM = 20\n\n# data preparation\nw_gt = np.random.randn(DATA_DIM)\nx = np.random.randn(DATA_NUM, DATA_DIM)\ny = x @ w_gt + 0.1 * np.random.randn(DATA_NUM)\ny = (y > 0).astype(float)\n\nidx = DATA_NUM // 2\nx_train, x_val, y_train, y_val = x[:idx, :], x[idx:, :], y[:idx], y[idx:]\nx_train, y_train = (\n torch.from_numpy(x_train).float(),\n torch.from_numpy(y_train).float(),\n)\nx_val, y_val = (\n torch.from_numpy(x_val).float(),\n torch.from_numpy(y_val).float(),\n)\n\n\ndef make_data_loader(xs, ys):\n datasets = [(xs, ys)]\n\n return datasets\n\n\nclass ChildNet(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n self.w = torch.nn.Parameter(torch.zeros(DATA_DIM))\n\n def forward(self, inputs):\n outs = inputs @ self.w\n return outs, self.w\n\n\nclass ParentNet(torch.nn.Module):\n def __init__(self) -> None:", "metadata": {"task_id": "leopard-ai--betty/32", "ground_truth": " super().__init__()\n\n self.w = torch.nn.Parameter(torch.ones(DATA_DIM))\n", "fpath_tuple": ["leopard-ai_betty", "betty", "test_install.py"], "context_start_lineno": 0, "lineno": 50, "function_name": "__init__"}, "groundtruth": " super().__init__()\n\n self.w = torch.nn.Parameter(torch.ones(DATA_DIM))\n"} +{"prompt": "import numpy as np\n\nimport torch\nimport torch.nn.functional as F\n\nfrom betty.engine import Engine\nfrom betty.configs import Config, EngineConfig\nfrom betty.problems import ImplicitProblem\n\n# hyperparameters\nDATA_NUM = 1000\nDATA_DIM = 20\n\n# data preparation\nw_gt = np.random.randn(DATA_DIM)\nx = np.random.randn(DATA_NUM, DATA_DIM)\ny = x @ w_gt + 0.1 * np.random.randn(DATA_NUM)\ny = (y > 0).astype(float)\n\nidx = DATA_NUM // 2\nx_train, x_val, y_train, y_val = x[:idx, :], x[idx:, :], y[:idx], y[idx:]\nx_train, y_train = (\n torch.from_numpy(x_train).float(),\n torch.from_numpy(y_train).float(),\n)\nx_val, y_val = (\n torch.from_numpy(x_val).float(),\n torch.from_numpy(y_val).float(),\n)\n\n\ndef make_data_loader(xs, ys):\n datasets = [(xs, ys)]\n\n return datasets\n\n\nclass ChildNet(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n self.w = torch.nn.Parameter(torch.zeros(DATA_DIM))\n\n def forward(self, inputs):\n outs = inputs @ self.w\n return outs, self.w\n\n\nclass ParentNet(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n self.w = torch.nn.Parameter(torch.ones(DATA_DIM))\n\n def forward(self):\n return self.w\n\n\nclass Parent(ImplicitProblem):\n def training_step(self, batch):", "metadata": {"task_id": "leopard-ai--betty/33", "ground_truth": " inputs, targets = batch\n outs = self.inner(inputs)[0]\n loss = F.binary_cross_entropy_with_logits(outs, targets)\n return loss\n", "fpath_tuple": ["leopard-ai_betty", "betty", "test_install.py"], "context_start_lineno": 0, "lineno": 60, "function_name": "training_step"}, "groundtruth": " inputs, targets = batch\n outs = self.inner(inputs)[0]\n loss = F.binary_cross_entropy_with_logits(outs, targets)\n return loss\n"} +{"prompt": "import numpy as np\n\nimport torch\nimport torch.nn.functional as F\n\nfrom betty.engine import Engine\nfrom betty.configs import Config, EngineConfig\nfrom betty.problems import ImplicitProblem\n\n# hyperparameters\nDATA_NUM = 1000\nDATA_DIM = 20\n\n# data preparation\nw_gt = np.random.randn(DATA_DIM)\nx = np.random.randn(DATA_NUM, DATA_DIM)\ny = x @ w_gt + 0.1 * np.random.randn(DATA_NUM)\ny = (y > 0).astype(float)\n\nidx = DATA_NUM // 2\nx_train, x_val, y_train, y_val = x[:idx, :], x[idx:, :], y[:idx], y[idx:]\nx_train, y_train = (\n torch.from_numpy(x_train).float(),\n torch.from_numpy(y_train).float(),\n)\nx_val, y_val = (\n torch.from_numpy(x_val).float(),\n torch.from_numpy(y_val).float(),\n)\n\n\ndef make_data_loader(xs, ys):\n datasets = [(xs, ys)]\n\n return datasets\n\n\nclass ChildNet(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n self.w = torch.nn.Parameter(torch.zeros(DATA_DIM))\n\n def forward(self, inputs):\n outs = inputs @ self.w\n return outs, self.w\n\n\nclass ParentNet(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n\n self.w = torch.nn.Parameter(torch.ones(DATA_DIM))\n\n def forward(self):\n return self.w\n\n\nclass Parent(ImplicitProblem):\n def training_step(self, batch):\n inputs, targets = batch\n outs = self.inner(inputs)[0]\n loss = F.binary_cross_entropy_with_logits(outs, targets)\n return loss\n\n def configure_train_data_loader(self):\n return make_data_loader(x_val, y_val)\n\n def configure_module(self):\n return ParentNet()\n\n def configure_optimizer(self):\n return torch.optim.SGD(self.module.parameters(), lr=1, momentum=0.9)\n\n def param_callback(self):\n for p in self.trainable_parameters():\n p.data.clamp_(min=1e-8)\n\n\nclass Child(ImplicitProblem):\n def training_step(self, batch):", "metadata": {"task_id": "leopard-ai--betty/34", "ground_truth": " inputs, targets = batch\n outs, params = self.module(inputs)\n loss = (\n F.binary_cross_entropy_with_logits(outs, targets)\n + 0.5\n * (\n params.unsqueeze(0) @ torch.diag(self.outer()) @ params.unsqueeze(1)\n ).sum()\n )\n return loss\n", "fpath_tuple": ["leopard-ai_betty", "betty", "test_install.py"], "context_start_lineno": 0, "lineno": 81, "function_name": "training_step"}, "groundtruth": " inputs, targets = batch\n outs, params = self.module(inputs)\n loss = (\n F.binary_cross_entropy_with_logits(outs, targets)\n + 0.5\n * (\n params.unsqueeze(0) @ torch.diag(self.outer()) @ params.unsqueeze(1)\n ).sum()\n )\n return loss\n"} +{"prompt": "import sys\nimport abc\nimport logging\n\n\n_logger = None\n\n\ndef get_logger():\n \"\"\"\n Get global logger.\n \"\"\"", "metadata": {"task_id": "leopard-ai--betty/35", "ground_truth": " global _logger\n if _logger:\n return _logger\n logger = logging.getLogger(\"betty\")\n log_format = logging.Formatter(\n \"[%(asctime)s] [%(levelname)s] %(message)s\", \"%Y-%m-%d %H:%M:%S\"\n )\n\n logger.propagate = False\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging.INFO)\n ch.setFormatter(log_format)\n logger.addHandler(ch)\n\n _logger = logger\n return _logger\n", "fpath_tuple": ["leopard-ai_betty", "betty", "logging", "logger_base.py"], "context_start_lineno": 0, "lineno": 12, "function_name": "get_logger"}, "groundtruth": " global _logger\n if _logger:\n return _logger\n logger = logging.getLogger(\"betty\")\n log_format = logging.Formatter(\n \"[%(asctime)s] [%(levelname)s] %(message)s\", \"%Y-%m-%d %H:%M:%S\"\n )\n\n logger.propagate = False\n logger.setLevel(logging.INFO)\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging.INFO)\n ch.setFormatter(log_format)\n logger.addHandler(ch)\n\n _logger = logger\n return _logger\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Plotting functions pre and post model fitting.\"\"\"\n\nimport functools\nimport logging\n\n# Using these types from typing instead of their generic types in the type hints\n# in order to be compatible with Python 3.7 and 3.8.\nfrom typing import Any, List, Optional, Sequence, Tuple\n\nimport arviz\nimport jax\nimport jax.numpy as jnp\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpyro\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn import metrics\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import models\nfrom lightweight_mmm import preprocessing\nfrom lightweight_mmm import utils\n\nplt.style.use(\"default\")\n\n_PALETTE = sns.color_palette(n_colors=100)\n\n\n@functools.partial(jax.jit, static_argnames=(\"media_mix_model\"))\ndef _make_single_prediction(media_mix_model: lightweight_mmm.LightweightMMM,\n mock_media: jnp.ndarray,\n extra_features: Optional[jnp.ndarray],\n seed: Optional[int]\n ) -> jnp.ndarray:\n \"\"\"Makes a prediction of a single row.\n\n Serves as a helper function for making predictions individually for each media\n channel and one row at a time. It is meant to be used vmaped otherwise it can\n be slow as it's meant to be used for plotting curve responses only. Use\n lightweight_mmm.LightweightMMM for regular predict functionality.\n\n Args:\n media_mix_model: Media mix model to use for getting the predictions.\n mock_media: Mock media for this iteration of predictions.\n extra_features: Extra features to use for predictions.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n A point estimate for the given data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/0", "ground_truth": " return media_mix_model.predict(\n media=jnp.expand_dims(mock_media, axis=0),\n extra_features=extra_features,\n seed=seed).mean(axis=0)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot.py"], "context_start_lineno": 0, "lineno": 68, "function_name": "_make_single_prediction"}, "groundtruth": " return media_mix_model.predict(\n media=jnp.expand_dims(mock_media, axis=0),\n extra_features=extra_features,\n seed=seed).mean(axis=0)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Plotting functions pre and post model fitting.\"\"\"\n\nimport functools\nimport logging\n\n# Using these types from typing instead of their generic types in the type hints\n# in order to be compatible with Python 3.7 and 3.8.\nfrom typing import Any, List, Optional, Sequence, Tuple\n\nimport arviz\nimport jax\nimport jax.numpy as jnp\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpyro\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn import metrics\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import models\nfrom lightweight_mmm import preprocessing\nfrom lightweight_mmm import utils\n\nplt.style.use(\"default\")\n\n_PALETTE = sns.color_palette(n_colors=100)\n\n\n@functools.partial(jax.jit, static_argnames=(\"media_mix_model\"))\ndef _make_single_prediction(media_mix_model: lightweight_mmm.LightweightMMM,\n mock_media: jnp.ndarray,\n extra_features: Optional[jnp.ndarray],\n seed: Optional[int]\n ) -> jnp.ndarray:\n \"\"\"Makes a prediction of a single row.\n\n Serves as a helper function for making predictions individually for each media\n channel and one row at a time. It is meant to be used vmaped otherwise it can\n be slow as it's meant to be used for plotting curve responses only. Use\n lightweight_mmm.LightweightMMM for regular predict functionality.\n\n Args:\n media_mix_model: Media mix model to use for getting the predictions.\n mock_media: Mock media for this iteration of predictions.\n extra_features: Extra features to use for predictions.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n A point estimate for the given data.\n \"\"\"\n return media_mix_model.predict(\n media=jnp.expand_dims(mock_media, axis=0),\n extra_features=extra_features,\n seed=seed).mean(axis=0)\n\n\n@functools.partial(\n jax.jit,\n static_argnames=(\"media_mix_model\", \"target_scaler\"))\ndef _generate_diagonal_predictions(\n media_mix_model: lightweight_mmm.LightweightMMM,\n media_values: jnp.ndarray,\n extra_features: Optional[jnp.ndarray],\n target_scaler: Optional[preprocessing.CustomScaler],\n prediction_offset: jnp.ndarray,\n seed: Optional[int]):\n \"\"\"Generates predictions for one value per channel leaving the rest to zero.\n\n This function does the following steps:\n - Vmaps the single prediction function on axis=0 of the media arg.\n - Diagonalizes the media input values so that each value is represented\n along side zeros on for the rest of the channels.\n - Generate predictions.\n - Unscale prediction if target_scaler is given.\n\n Args:\n media_mix_model: Media mix model to use for plotting the response curves.\n media_values: Media values.\n extra_features: Extra features values.\n target_scaler: Scaler used for scaling the target, to unscaled values and\n plot in the original scale.\n prediction_offset: The value of a prediction of an all zero media input.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n The predictions for the given data.\n \"\"\"\n make_predictions = jax.vmap(fun=_make_single_prediction,\n in_axes=(None, 0, None, None))\n diagonal = jnp.eye(media_values.shape[0])\n if media_values.ndim == 2: # Only two since we only provide one row\n diagonal = jnp.expand_dims(diagonal, axis=-1)\n media_values = jnp.expand_dims(media_values, axis=0)\n diag_media_values = diagonal * media_values\n predictions = make_predictions(\n media_mix_model,\n diag_media_values,\n extra_features,\n seed) - prediction_offset\n predictions = jnp.squeeze(predictions)\n if target_scaler:\n predictions = target_scaler.inverse_transform(predictions)\n if predictions.ndim == 2:\n predictions = jnp.sum(predictions, axis=-1)\n return predictions\n\n\ndef _calculate_number_rows_plot(n_media_channels: int, n_columns: int):\n \"\"\"Calculates the number of rows of plots needed to fit n + 1 plots in n_cols.\n\n Args:\n n_media_channels: Number of media channels. The total of plots needed is\n n_media_channels + 1.\n n_columns: Number of columns in the plot grid.\n\n Returns:\n The number of rows of plots needed to fit n + 1 plots in n cols\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/1", "ground_truth": " if n_media_channels % n_columns == 0:\n return n_media_channels // n_columns + 1\n return n_media_channels // n_columns + 2\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot.py"], "context_start_lineno": 0, "lineno": 138, "function_name": "_calculate_number_rows_plot"}, "groundtruth": " if n_media_channels % n_columns == 0:\n return n_media_channels // n_columns + 1\n return n_media_channels // n_columns + 2\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Plotting functions pre and post model fitting.\"\"\"\n\nimport functools\nimport logging\n\n# Using these types from typing instead of their generic types in the type hints\n# in order to be compatible with Python 3.7 and 3.8.\nfrom typing import Any, List, Optional, Sequence, Tuple\n\nimport arviz\nimport jax\nimport jax.numpy as jnp\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpyro\nimport pandas as pd\nimport seaborn as sns\nfrom sklearn import metrics\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import models\nfrom lightweight_mmm import preprocessing\nfrom lightweight_mmm import utils\n\nplt.style.use(\"default\")\n\n_PALETTE = sns.color_palette(n_colors=100)\n\n\n@functools.partial(jax.jit, static_argnames=(\"media_mix_model\"))\ndef _make_single_prediction(media_mix_model: lightweight_mmm.LightweightMMM,\n mock_media: jnp.ndarray,\n extra_features: Optional[jnp.ndarray],\n seed: Optional[int]\n ) -> jnp.ndarray:\n \"\"\"Makes a prediction of a single row.\n\n Serves as a helper function for making predictions individually for each media\n channel and one row at a time. It is meant to be used vmaped otherwise it can\n be slow as it's meant to be used for plotting curve responses only. Use\n lightweight_mmm.LightweightMMM for regular predict functionality.\n\n Args:\n media_mix_model: Media mix model to use for getting the predictions.\n mock_media: Mock media for this iteration of predictions.\n extra_features: Extra features to use for predictions.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n A point estimate for the given data.\n \"\"\"\n return media_mix_model.predict(\n media=jnp.expand_dims(mock_media, axis=0),\n extra_features=extra_features,\n seed=seed).mean(axis=0)\n\n\n@functools.partial(\n jax.jit,\n static_argnames=(\"media_mix_model\", \"target_scaler\"))\ndef _generate_diagonal_predictions(\n media_mix_model: lightweight_mmm.LightweightMMM,\n media_values: jnp.ndarray,\n extra_features: Optional[jnp.ndarray],\n target_scaler: Optional[preprocessing.CustomScaler],\n prediction_offset: jnp.ndarray,\n seed: Optional[int]):\n \"\"\"Generates predictions for one value per channel leaving the rest to zero.\n\n This function does the following steps:\n - Vmaps the single prediction function on axis=0 of the media arg.\n - Diagonalizes the media input values so that each value is represented\n along side zeros on for the rest of the channels.\n - Generate predictions.\n - Unscale prediction if target_scaler is given.\n\n Args:\n media_mix_model: Media mix model to use for plotting the response curves.\n media_values: Media values.\n extra_features: Extra features values.\n target_scaler: Scaler used for scaling the target, to unscaled values and\n plot in the original scale.\n prediction_offset: The value of a prediction of an all zero media input.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n The predictions for the given data.\n \"\"\"\n make_predictions = jax.vmap(fun=_make_single_prediction,\n in_axes=(None, 0, None, None))\n diagonal = jnp.eye(media_values.shape[0])\n if media_values.ndim == 2: # Only two since we only provide one row\n diagonal = jnp.expand_dims(diagonal, axis=-1)\n media_values = jnp.expand_dims(media_values, axis=0)\n diag_media_values = diagonal * media_values\n predictions = make_predictions(\n media_mix_model,\n diag_media_values,\n extra_features,\n seed) - prediction_offset\n predictions = jnp.squeeze(predictions)\n if target_scaler:\n predictions = target_scaler.inverse_transform(predictions)\n if predictions.ndim == 2:\n predictions = jnp.sum(predictions, axis=-1)\n return predictions\n\n\ndef _calculate_number_rows_plot(n_media_channels: int, n_columns: int):\n \"\"\"Calculates the number of rows of plots needed to fit n + 1 plots in n_cols.\n\n Args:\n n_media_channels: Number of media channels. The total of plots needed is\n n_media_channels + 1.\n n_columns: Number of columns in the plot grid.\n\n Returns:\n The number of rows of plots needed to fit n + 1 plots in n cols\n \"\"\"\n if n_media_channels % n_columns == 0:\n return n_media_channels // n_columns + 1\n return n_media_channels // n_columns + 2\n\n\ndef _calculate_media_contribution(\n media_mix_model: lightweight_mmm.LightweightMMM) -> jnp.ndarray:\n \"\"\"Computes contribution for each sample, time, channel.\n\n Serves as a helper function for making predictions for each channel, time\n and estimate sample. It is meant to be used in creating media baseline\n contribution dataframe and visualize media attribution over spend proportion\n plot.\n\n Args:\n media_mix_model: Media mix model.\n\n Returns:\n Estimation of contribution for each sample, time, channel.\n\n Raises:\n NotFittedModelError: if the model is not fitted before computation\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/2", "ground_truth": " if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its fit.\")\n\n if media_mix_model.trace[\"media_transformed\"].ndim > 3:\n # s for samples, t for time, c for media channels, g for geo\n einsum_str = \"stcg, scg->stcg\"\n elif media_mix_model.trace[\"media_transformed\"].ndim == 3:\n # s for samples, t for time, c for media channels\n einsum_str = \"stc, sc->stc\"\n\n media_contribution = jnp.einsum(einsum_str,\n media_mix_model.trace[\"media_transformed\"],\n media_mix_model.trace[\"coef_media\"])\n if media_mix_model.trace[\"media_transformed\"].ndim > 3:\n # Aggregate media channel contribution across geos.\n media_contribution = media_contribution.sum(axis=-1)\n return media_contribution\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot.py"], "context_start_lineno": 0, "lineno": 161, "function_name": "_calculate_media_contribution"}, "groundtruth": " if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its fit.\")\n\n if media_mix_model.trace[\"media_transformed\"].ndim > 3:\n # s for samples, t for time, c for media channels, g for geo\n einsum_str = \"stcg, scg->stcg\"\n elif media_mix_model.trace[\"media_transformed\"].ndim == 3:\n # s for samples, t for time, c for media channels\n einsum_str = \"stc, sc->stc\"\n\n media_contribution = jnp.einsum(einsum_str,\n media_mix_model.trace[\"media_transformed\"],\n media_mix_model.trace[\"coef_media\"])\n if media_mix_model.trace[\"media_transformed\"].ndim > 3:\n # Aggregate media channel contribution across geos.\n media_contribution = media_contribution.sum(axis=-1)\n return media_contribution\n"} +{"prompt": " # Create media contribution matrix.\n scaled_media_contribution = _calculate_media_contribution(media_mix_model)\n\n # Aggregate media channel contribution across samples.\n sum_scaled_media_contribution_across_samples = scaled_media_contribution.sum(\n axis=0)\n # Aggregate media channel contribution across channels.\n sum_scaled_media_contribution_across_channels = scaled_media_contribution.sum(\n axis=2)\n\n # Calculate the baseline contribution.\n # Scaled prediction - sum of scaled contribution across channels.\n scaled_prediction = media_mix_model.trace[\"mu\"]\n if media_mix_model.trace[\"media_transformed\"].ndim > 3:\n # Sum up the scaled prediction across all the geos.\n scaled_prediction = scaled_prediction.sum(axis=-1)\n baseline_contribution = scaled_prediction - sum_scaled_media_contribution_across_channels\n\n # Sum up the scaled media, baseline contribution and predictio across samples.\n sum_scaled_media_contribution_across_channels_samples = sum_scaled_media_contribution_across_channels.sum(\n axis=0)\n sum_scaled_baseline_contribution_across_samples = baseline_contribution.sum(\n axis=0)\n\n # Adjust baseline contribution and prediction when there's any negative value.\n adjusted_sum_scaled_baseline_contribution_across_samples = np.where(\n sum_scaled_baseline_contribution_across_samples < 0, 0,\n sum_scaled_baseline_contribution_across_samples)\n adjusted_sum_scaled_prediction_across_samples = adjusted_sum_scaled_baseline_contribution_across_samples + sum_scaled_media_contribution_across_channels_samples\n\n # Calculate the media and baseline pct.\n # Media/baseline contribution across samples/total prediction across samples.\n media_contribution_pct_by_channel = (\n sum_scaled_media_contribution_across_samples /\n adjusted_sum_scaled_prediction_across_samples.reshape(-1, 1))\n # Adjust media pct contribution if the value is nan\n media_contribution_pct_by_channel = np.nan_to_num(\n media_contribution_pct_by_channel)\n\n baseline_contribution_pct = adjusted_sum_scaled_baseline_contribution_across_samples / adjusted_sum_scaled_prediction_across_samples\n # Adjust baseline pct contribution if the value is nan\n baseline_contribution_pct = np.nan_to_num(\n baseline_contribution_pct)\n\n # If the channel_names is none, then create naming covention for the channels.\n if channel_names is None:\n channel_names = media_mix_model.media_names\n\n # Create media/baseline contribution pct as dataframes.\n media_contribution_pct_by_channel_df = pd.DataFrame(\n media_contribution_pct_by_channel, columns=channel_names)\n baseline_contribution_pct_df = pd.DataFrame(\n baseline_contribution_pct, columns=[\"baseline\"])\n contribution_pct_df = pd.merge(\n media_contribution_pct_by_channel_df,\n baseline_contribution_pct_df,\n left_index=True,\n right_index=True)\n\n # If there's target scaler then inverse transform the posterior prediction.\n posterior_pred = media_mix_model.trace[\"mu\"]\n if target_scaler:\n posterior_pred = target_scaler.inverse_transform(posterior_pred)\n\n # Take the sum of posterior predictions across geos.\n if media_mix_model.trace[\"media_transformed\"].ndim > 3:\n posterior_pred = posterior_pred.sum(axis=-1)\n\n # Take the average of the inverse transformed prediction across samples.\n posterior_pred_df = pd.DataFrame(\n posterior_pred.mean(axis=0), columns=[\"avg_prediction\"])\n\n # Adjust prediction value when prediction is less than 0.\n posterior_pred_df[\"avg_prediction\"] = np.where(\n posterior_pred_df[\"avg_prediction\"] < 0, 0,\n posterior_pred_df[\"avg_prediction\"])\n\n contribution_pct_df.columns = [\n \"{}_percentage\".format(col) for col in contribution_pct_df.columns\n ]\n contribution_df = pd.merge(\n contribution_pct_df, posterior_pred_df, left_index=True, right_index=True)\n\n # Create contribution by multiplying average prediction by media/baseline pct.\n for channel in channel_names:\n channel_contribution_col_name = \"{} contribution\".format(channel)\n channel_pct_col = \"{}_percentage\".format(channel)\n contribution_df.loc[:, channel_contribution_col_name] = contribution_df[\n channel_pct_col] * contribution_df[\"avg_prediction\"]\n contribution_df.loc[:, channel_contribution_col_name] = contribution_df[\n channel_contribution_col_name].astype(\"float\")\n contribution_df.loc[:, \"baseline contribution\"] = contribution_df[\n \"baseline_percentage\"] * contribution_df[\"avg_prediction\"]\n\n period = np.arange(1, contribution_df.shape[0] + 1)\n contribution_df.loc[:, \"period\"] = period\n return contribution_df\n\n\ndef plot_response_curves(# jax-ndarray\n media_mix_model: lightweight_mmm.LightweightMMM,\n media_scaler: Optional[preprocessing.CustomScaler] = None,\n target_scaler: Optional[preprocessing.CustomScaler] = None,\n prices: jnp.ndarray = None,\n optimal_allocation_per_timeunit: Optional[jnp.ndarray] = None,\n steps: int = 50,\n percentage_add: float = 0.2,\n apply_log_scale: bool = False,\n figure_size: Tuple[int, int] = (8, 10),\n n_columns: int = 3,\n marker_size: int = 8,\n legend_fontsize: int = 8,\n seed: Optional[int] = None) -> matplotlib.figure.Figure:\n \"\"\"Plots the response curves of each media channel based on the model.\n\n It plots an individual subplot for each media channel. If '\n optimal_allocation_per_timeunit is given it uses it to add markers based on\n historic average spend and the given optimal one on each of the individual\n subplots.\n\n It then plots a combined plot with all the response curves which can be\n changed to log scale if apply_log_scale is True.\n\n Args:\n media_mix_model: Media mix model to use for plotting the response curves.\n media_scaler: Scaler that was used to scale the media data before training.\n target_scaler: Scaler used for scaling the target, to unscaled values and\n plot in the original scale.\n prices: Prices to translate the media units to spend. If all your data is\n already in spend numbers you can leave this as None. If some of your data\n is media spend and others is media unit, leave the media spend with price\n 1 and add the price to the media unit channels.\n optimal_allocation_per_timeunit: Optimal allocation per time unit per media\n channel. This can be obtained by running the optimization provided by\n LightweightMMM.\n steps: Number of steps to simulate.\n percentage_add: Percentage too exceed the maximum historic spend for the\n simulation of the response curve.\n apply_log_scale: Whether to apply the log scale to the predictions (Y axis).\n When some media channels have very large scale compare to others it might\n be useful to use apply_log_scale=True. Default is False.\n figure_size: Size of the plot figure.\n n_columns: Number of columns to display in the subplots grid. Modifying this\n parameter might require to adjust figure_size accordingly for the plot\n to still have reasonable structure.\n marker_size: Size of the marker for the optimization annotations. Only\n useful if optimal_allocation_per_timeunit is not None. Default is 8.\n legend_fontsize: Legend font size for individual subplots.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n Plots of response curves.\n \"\"\"\n if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its response \"\n \"curves.\")\n media = media_mix_model.media\n media_maxes = media.max(axis=0) * (1 + percentage_add)\n if media_mix_model._extra_features is not None:\n extra_features = jnp.expand_dims(\n media_mix_model._extra_features.mean(axis=0), axis=0)\n else:\n extra_features = None\n media_ranges = jnp.expand_dims(\n jnp.linspace(start=0, stop=media_maxes, num=steps), axis=0)\n\n make_predictions = jax.vmap(\n jax.vmap(_make_single_prediction,\n in_axes=(None, 0, None, None),\n out_axes=0),\n in_axes=(None, 0, None, None), out_axes=1)\n diagonal = jnp.repeat(\n jnp.eye(media_mix_model.n_media_channels), steps,\n axis=0).reshape(media_mix_model.n_media_channels, steps,\n media_mix_model.n_media_channels)\n\n prediction_offset = media_mix_model.predict(\n media=jnp.zeros((1, *media.shape[1:])),\n extra_features=extra_features).mean(axis=0)\n\n if media.ndim == 3:\n diagonal = jnp.expand_dims(diagonal, axis=-1)\n prediction_offset = jnp.expand_dims(prediction_offset, axis=0)\n mock_media = media_ranges * diagonal\n predictions = jnp.squeeze(a=make_predictions(media_mix_model,\n mock_media,\n extra_features,\n seed))\n predictions = predictions - prediction_offset\n media_ranges = jnp.squeeze(media_ranges)\n if target_scaler:\n predictions = target_scaler.inverse_transform(predictions)\n\n if media_scaler:\n media_ranges = media_scaler.inverse_transform(media_ranges)\n\n if prices is not None:\n if media.ndim == 3:\n prices = jnp.expand_dims(prices, axis=-1)\n media_ranges *= prices\n\n if predictions.ndim == 3:\n media_ranges = jnp.sum(media_ranges, axis=-1)\n predictions = jnp.sum(predictions, axis=-1)\n\n if optimal_allocation_per_timeunit is not None:\n average_allocation = media_mix_model.media.mean(axis=0)\n average_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=average_allocation,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n optimal_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=optimal_allocation_per_timeunit,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n if media_scaler:\n average_allocation = media_scaler.inverse_transform(average_allocation)\n optimal_allocation_per_timeunit = media_scaler.inverse_transform(\n optimal_allocation_per_timeunit)\n if prices is not None:\n optimal_allocation_per_timeunit *= prices\n average_allocation *= prices\n if media.ndim == 3:\n average_allocation = jnp.sum(average_allocation, axis=-1)\n optimal_allocation_per_timeunit = jnp.sum(\n optimal_allocation_per_timeunit, axis=-1)\n\n kpi_label = \"KPI\" if target_scaler else \"Normalized KPI\"\n fig = plt.figure(media_mix_model.n_media_channels + 1,\n figsize=figure_size,\n tight_layout=True)\n n_rows = _calculate_number_rows_plot(\n n_media_channels=media_mix_model.n_media_channels, n_columns=n_columns)\n last_ax = fig.add_subplot(n_rows, 1, n_rows)\n for i in range(media_mix_model.n_media_channels):\n ax = fig.add_subplot(n_rows, n_columns, i + 1)\n sns.lineplot(\n x=media_ranges[:, i],\n y=predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=ax)\n sns.lineplot(\n x=media_ranges[:, i],\n y=jnp.log(predictions[:, i]) if apply_log_scale else predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=last_ax)\n if optimal_allocation_per_timeunit is not None:\n ax.plot(\n average_allocation[i],\n average_allocation_predictions[i],\n marker=\"o\",\n markersize=marker_size,\n label=\"avg_spend\",\n color=_PALETTE[i])\n ax.plot(\n optimal_allocation_per_timeunit[i],\n optimal_allocation_predictions[i],\n marker=\"x\",\n markersize=marker_size + 2,\n label=\"optimal_spend\",\n color=_PALETTE[i])\n ax.set_ylabel(kpi_label)\n ax.set_xlabel(\"Normalized Spend\" if not media_scaler else \"Spend\")\n ax.legend(fontsize=legend_fontsize)\n\n fig.suptitle(\"Response curves\", fontsize=20)\n last_ax.set_ylabel(kpi_label if not apply_log_scale else f\"log({kpi_label})\")\n last_ax.set_xlabel(\"Normalized spend per channel\"\n if not media_scaler else \"Spend per channel\")\n plt.close()\n return fig\n\n\ndef plot_cross_correlate(feature: jnp.ndarray,\n target: jnp.ndarray,\n maxlags: int = 10) -> Tuple[int, float]:\n \"\"\"Plots the cross correlation coefficients between 2 vectors.\n\n In the chart look for positive peaks, this shows how the lags of the feature\n lead the target.\n\n Args:\n feature: Vector, the lags of which predict target.\n target: Vector, what is predicted.\n maxlags: Maximum number of lags.\n\n Returns:\n Lag index and corresponding correlation of the peak correlation.\n\n Raises:\n ValueError: If inputs don't have same length.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/3", "ground_truth": " if len(feature) != len(target):\n raise ValueError(\"feature and target need to have the same length.\")\n maxlags = jnp.minimum(len(feature) - 1, maxlags)\n mean_feature, mean_target = feature.mean(), target.mean()\n plot = plt.xcorr(\n x=feature - mean_feature, y=target - mean_target, maxlags=maxlags)\n plt.show()\n maxidx = plot[1][plot[0] <= 0].argmax()\n return plot[0][maxidx], plot[1][maxidx]\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot.py"], "context_start_lineno": 268, "lineno": 571, "function_name": "plot_cross_correlate"}, "groundtruth": " if len(feature) != len(target):\n raise ValueError(\"feature and target need to have the same length.\")\n maxlags = jnp.minimum(len(feature) - 1, maxlags)\n mean_feature, mean_target = feature.mean(), target.mean()\n plot = plt.xcorr(\n x=feature - mean_feature, y=target - mean_target, maxlags=maxlags)\n plt.show()\n maxidx = plot[1][plot[0] <= 0].argmax()\n return plot[0][maxidx], plot[1][maxidx]\n"} +{"prompt": "_channels_samples = sum_scaled_media_contribution_across_channels.sum(\n axis=0)\n sum_scaled_baseline_contribution_across_samples = baseline_contribution.sum(\n axis=0)\n\n # Adjust baseline contribution and prediction when there's any negative value.\n adjusted_sum_scaled_baseline_contribution_across_samples = np.where(\n sum_scaled_baseline_contribution_across_samples < 0, 0,\n sum_scaled_baseline_contribution_across_samples)\n adjusted_sum_scaled_prediction_across_samples = adjusted_sum_scaled_baseline_contribution_across_samples + sum_scaled_media_contribution_across_channels_samples\n\n # Calculate the media and baseline pct.\n # Media/baseline contribution across samples/total prediction across samples.\n media_contribution_pct_by_channel = (\n sum_scaled_media_contribution_across_samples /\n adjusted_sum_scaled_prediction_across_samples.reshape(-1, 1))\n # Adjust media pct contribution if the value is nan\n media_contribution_pct_by_channel = np.nan_to_num(\n media_contribution_pct_by_channel)\n\n baseline_contribution_pct = adjusted_sum_scaled_baseline_contribution_across_samples / adjusted_sum_scaled_prediction_across_samples\n # Adjust baseline pct contribution if the value is nan\n baseline_contribution_pct = np.nan_to_num(\n baseline_contribution_pct)\n\n # If the channel_names is none, then create naming covention for the channels.\n if channel_names is None:\n channel_names = media_mix_model.media_names\n\n # Create media/baseline contribution pct as dataframes.\n media_contribution_pct_by_channel_df = pd.DataFrame(\n media_contribution_pct_by_channel, columns=channel_names)\n baseline_contribution_pct_df = pd.DataFrame(\n baseline_contribution_pct, columns=[\"baseline\"])\n contribution_pct_df = pd.merge(\n media_contribution_pct_by_channel_df,\n baseline_contribution_pct_df,\n left_index=True,\n right_index=True)\n\n # If there's target scaler then inverse transform the posterior prediction.\n posterior_pred = media_mix_model.trace[\"mu\"]\n if target_scaler:\n posterior_pred = target_scaler.inverse_transform(posterior_pred)\n\n # Take the sum of posterior predictions across geos.\n if media_mix_model.trace[\"media_transformed\"].ndim > 3:\n posterior_pred = posterior_pred.sum(axis=-1)\n\n # Take the average of the inverse transformed prediction across samples.\n posterior_pred_df = pd.DataFrame(\n posterior_pred.mean(axis=0), columns=[\"avg_prediction\"])\n\n # Adjust prediction value when prediction is less than 0.\n posterior_pred_df[\"avg_prediction\"] = np.where(\n posterior_pred_df[\"avg_prediction\"] < 0, 0,\n posterior_pred_df[\"avg_prediction\"])\n\n contribution_pct_df.columns = [\n \"{}_percentage\".format(col) for col in contribution_pct_df.columns\n ]\n contribution_df = pd.merge(\n contribution_pct_df, posterior_pred_df, left_index=True, right_index=True)\n\n # Create contribution by multiplying average prediction by media/baseline pct.\n for channel in channel_names:\n channel_contribution_col_name = \"{} contribution\".format(channel)\n channel_pct_col = \"{}_percentage\".format(channel)\n contribution_df.loc[:, channel_contribution_col_name] = contribution_df[\n channel_pct_col] * contribution_df[\"avg_prediction\"]\n contribution_df.loc[:, channel_contribution_col_name] = contribution_df[\n channel_contribution_col_name].astype(\"float\")\n contribution_df.loc[:, \"baseline contribution\"] = contribution_df[\n \"baseline_percentage\"] * contribution_df[\"avg_prediction\"]\n\n period = np.arange(1, contribution_df.shape[0] + 1)\n contribution_df.loc[:, \"period\"] = period\n return contribution_df\n\n\ndef plot_response_curves(# jax-ndarray\n media_mix_model: lightweight_mmm.LightweightMMM,\n media_scaler: Optional[preprocessing.CustomScaler] = None,\n target_scaler: Optional[preprocessing.CustomScaler] = None,\n prices: jnp.ndarray = None,\n optimal_allocation_per_timeunit: Optional[jnp.ndarray] = None,\n steps: int = 50,\n percentage_add: float = 0.2,\n apply_log_scale: bool = False,\n figure_size: Tuple[int, int] = (8, 10),\n n_columns: int = 3,\n marker_size: int = 8,\n legend_fontsize: int = 8,\n seed: Optional[int] = None) -> matplotlib.figure.Figure:\n \"\"\"Plots the response curves of each media channel based on the model.\n\n It plots an individual subplot for each media channel. If '\n optimal_allocation_per_timeunit is given it uses it to add markers based on\n historic average spend and the given optimal one on each of the individual\n subplots.\n\n It then plots a combined plot with all the response curves which can be\n changed to log scale if apply_log_scale is True.\n\n Args:\n media_mix_model: Media mix model to use for plotting the response curves.\n media_scaler: Scaler that was used to scale the media data before training.\n target_scaler: Scaler used for scaling the target, to unscaled values and\n plot in the original scale.\n prices: Prices to translate the media units to spend. If all your data is\n already in spend numbers you can leave this as None. If some of your data\n is media spend and others is media unit, leave the media spend with price\n 1 and add the price to the media unit channels.\n optimal_allocation_per_timeunit: Optimal allocation per time unit per media\n channel. This can be obtained by running the optimization provided by\n LightweightMMM.\n steps: Number of steps to simulate.\n percentage_add: Percentage too exceed the maximum historic spend for the\n simulation of the response curve.\n apply_log_scale: Whether to apply the log scale to the predictions (Y axis).\n When some media channels have very large scale compare to others it might\n be useful to use apply_log_scale=True. Default is False.\n figure_size: Size of the plot figure.\n n_columns: Number of columns to display in the subplots grid. Modifying this\n parameter might require to adjust figure_size accordingly for the plot\n to still have reasonable structure.\n marker_size: Size of the marker for the optimization annotations. Only\n useful if optimal_allocation_per_timeunit is not None. Default is 8.\n legend_fontsize: Legend font size for individual subplots.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n Plots of response curves.\n \"\"\"\n if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its response \"\n \"curves.\")\n media = media_mix_model.media\n media_maxes = media.max(axis=0) * (1 + percentage_add)\n if media_mix_model._extra_features is not None:\n extra_features = jnp.expand_dims(\n media_mix_model._extra_features.mean(axis=0), axis=0)\n else:\n extra_features = None\n media_ranges = jnp.expand_dims(\n jnp.linspace(start=0, stop=media_maxes, num=steps), axis=0)\n\n make_predictions = jax.vmap(\n jax.vmap(_make_single_prediction,\n in_axes=(None, 0, None, None),\n out_axes=0),\n in_axes=(None, 0, None, None), out_axes=1)\n diagonal = jnp.repeat(\n jnp.eye(media_mix_model.n_media_channels), steps,\n axis=0).reshape(media_mix_model.n_media_channels, steps,\n media_mix_model.n_media_channels)\n\n prediction_offset = media_mix_model.predict(\n media=jnp.zeros((1, *media.shape[1:])),\n extra_features=extra_features).mean(axis=0)\n\n if media.ndim == 3:\n diagonal = jnp.expand_dims(diagonal, axis=-1)\n prediction_offset = jnp.expand_dims(prediction_offset, axis=0)\n mock_media = media_ranges * diagonal\n predictions = jnp.squeeze(a=make_predictions(media_mix_model,\n mock_media,\n extra_features,\n seed))\n predictions = predictions - prediction_offset\n media_ranges = jnp.squeeze(media_ranges)\n if target_scaler:\n predictions = target_scaler.inverse_transform(predictions)\n\n if media_scaler:\n media_ranges = media_scaler.inverse_transform(media_ranges)\n\n if prices is not None:\n if media.ndim == 3:\n prices = jnp.expand_dims(prices, axis=-1)\n media_ranges *= prices\n\n if predictions.ndim == 3:\n media_ranges = jnp.sum(media_ranges, axis=-1)\n predictions = jnp.sum(predictions, axis=-1)\n\n if optimal_allocation_per_timeunit is not None:\n average_allocation = media_mix_model.media.mean(axis=0)\n average_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=average_allocation,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n optimal_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=optimal_allocation_per_timeunit,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n if media_scaler:\n average_allocation = media_scaler.inverse_transform(average_allocation)\n optimal_allocation_per_timeunit = media_scaler.inverse_transform(\n optimal_allocation_per_timeunit)\n if prices is not None:\n optimal_allocation_per_timeunit *= prices\n average_allocation *= prices\n if media.ndim == 3:\n average_allocation = jnp.sum(average_allocation, axis=-1)\n optimal_allocation_per_timeunit = jnp.sum(\n optimal_allocation_per_timeunit, axis=-1)\n\n kpi_label = \"KPI\" if target_scaler else \"Normalized KPI\"\n fig = plt.figure(media_mix_model.n_media_channels + 1,\n figsize=figure_size,\n tight_layout=True)\n n_rows = _calculate_number_rows_plot(\n n_media_channels=media_mix_model.n_media_channels, n_columns=n_columns)\n last_ax = fig.add_subplot(n_rows, 1, n_rows)\n for i in range(media_mix_model.n_media_channels):\n ax = fig.add_subplot(n_rows, n_columns, i + 1)\n sns.lineplot(\n x=media_ranges[:, i],\n y=predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=ax)\n sns.lineplot(\n x=media_ranges[:, i],\n y=jnp.log(predictions[:, i]) if apply_log_scale else predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=last_ax)\n if optimal_allocation_per_timeunit is not None:\n ax.plot(\n average_allocation[i],\n average_allocation_predictions[i],\n marker=\"o\",\n markersize=marker_size,\n label=\"avg_spend\",\n color=_PALETTE[i])\n ax.plot(\n optimal_allocation_per_timeunit[i],\n optimal_allocation_predictions[i],\n marker=\"x\",\n markersize=marker_size + 2,\n label=\"optimal_spend\",\n color=_PALETTE[i])\n ax.set_ylabel(kpi_label)\n ax.set_xlabel(\"Normalized Spend\" if not media_scaler else \"Spend\")\n ax.legend(fontsize=legend_fontsize)\n\n fig.suptitle(\"Response curves\", fontsize=20)\n last_ax.set_ylabel(kpi_label if not apply_log_scale else f\"log({kpi_label})\")\n last_ax.set_xlabel(\"Normalized spend per channel\"\n if not media_scaler else \"Spend per channel\")\n plt.close()\n return fig\n\n\ndef plot_cross_correlate(feature: jnp.ndarray,\n target: jnp.ndarray,\n maxlags: int = 10) -> Tuple[int, float]:\n \"\"\"Plots the cross correlation coefficients between 2 vectors.\n\n In the chart look for positive peaks, this shows how the lags of the feature\n lead the target.\n\n Args:\n feature: Vector, the lags of which predict target.\n target: Vector, what is predicted.\n maxlags: Maximum number of lags.\n\n Returns:\n Lag index and corresponding correlation of the peak correlation.\n\n Raises:\n ValueError: If inputs don't have same length.\n \"\"\"\n if len(feature) != len(target):\n raise ValueError(\"feature and target need to have the same length.\")\n maxlags = jnp.minimum(len(feature) - 1, maxlags)\n mean_feature, mean_target = feature.mean(), target.mean()\n plot = plt.xcorr(\n x=feature - mean_feature, y=target - mean_target, maxlags=maxlags)\n plt.show()\n maxidx = plot[1][plot[0] <= 0].argmax()\n return plot[0][maxidx], plot[1][maxidx]\n\n\ndef plot_var_cost(media: jnp.ndarray, costs: jnp.ndarray,\n names: List[str]) -> matplotlib.figure.Figure:\n \"\"\"Plots a a chart between the coefficient of variation and cost.\n\n Args:\n media: Media matrix.\n costs: Cost vector.\n names: List of variable names.\n\n Returns:\n Plot of coefficient of variation and cost.\n\n Raises:\n ValueError if inputs don't conform to same length.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/4", "ground_truth": " if media.shape[1] != len(costs):\n raise ValueError(\"media columns and costs needs to have same length.\")\n if media.shape[1] != len(names):\n raise ValueError(\"media columns and names needs to have same length.\")\n coef_of_variation = media.std(axis=0) / media.mean(axis=0)\n\n fig, ax = plt.subplots(1, 1)\n ax.scatter(x=costs, y=coef_of_variation)\n # https://queirozf.com/entries/add-labels-and-text-to-matplotlib-plots-annotation-examples.\n for i in range(len(costs)):\n x, y, label = costs[i], coef_of_variation[i], names[i]\n ax.annotate(text=label, xy=(x, y))\n ax.set_xlabel(\"Cost\")\n ax.set_ylabel(\"Coef of Variation\")\n plt.close()\n return fig\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot.py"], "context_start_lineno": 287, "lineno": 597, "function_name": "plot_var_cost"}, "groundtruth": " if media.shape[1] != len(costs):\n raise ValueError(\"media columns and costs needs to have same length.\")\n if media.shape[1] != len(names):\n raise ValueError(\"media columns and names needs to have same length.\")\n coef_of_variation = media.std(axis=0) / media.mean(axis=0)\n\n fig, ax = plt.subplots(1, 1)\n ax.scatter(x=costs, y=coef_of_variation)\n # https://queirozf.com/entries/add-labels-and-text-to-matplotlib-plots-annotation-examples.\n for i in range(len(costs)):\n x, y, label = costs[i], coef_of_variation[i], names[i]\n ax.annotate(text=label, xy=(x, y))\n ax.set_xlabel(\"Cost\")\n ax.set_ylabel(\"Coef of Variation\")\n plt.close()\n return fig\n"} +{"prompt": "aler] = None,\n target_scaler: Optional[preprocessing.CustomScaler] = None,\n prices: jnp.ndarray = None,\n optimal_allocation_per_timeunit: Optional[jnp.ndarray] = None,\n steps: int = 50,\n percentage_add: float = 0.2,\n apply_log_scale: bool = False,\n figure_size: Tuple[int, int] = (8, 10),\n n_columns: int = 3,\n marker_size: int = 8,\n legend_fontsize: int = 8,\n seed: Optional[int] = None) -> matplotlib.figure.Figure:\n \"\"\"Plots the response curves of each media channel based on the model.\n\n It plots an individual subplot for each media channel. If '\n optimal_allocation_per_timeunit is given it uses it to add markers based on\n historic average spend and the given optimal one on each of the individual\n subplots.\n\n It then plots a combined plot with all the response curves which can be\n changed to log scale if apply_log_scale is True.\n\n Args:\n media_mix_model: Media mix model to use for plotting the response curves.\n media_scaler: Scaler that was used to scale the media data before training.\n target_scaler: Scaler used for scaling the target, to unscaled values and\n plot in the original scale.\n prices: Prices to translate the media units to spend. If all your data is\n already in spend numbers you can leave this as None. If some of your data\n is media spend and others is media unit, leave the media spend with price\n 1 and add the price to the media unit channels.\n optimal_allocation_per_timeunit: Optimal allocation per time unit per media\n channel. This can be obtained by running the optimization provided by\n LightweightMMM.\n steps: Number of steps to simulate.\n percentage_add: Percentage too exceed the maximum historic spend for the\n simulation of the response curve.\n apply_log_scale: Whether to apply the log scale to the predictions (Y axis).\n When some media channels have very large scale compare to others it might\n be useful to use apply_log_scale=True. Default is False.\n figure_size: Size of the plot figure.\n n_columns: Number of columns to display in the subplots grid. Modifying this\n parameter might require to adjust figure_size accordingly for the plot\n to still have reasonable structure.\n marker_size: Size of the marker for the optimization annotations. Only\n useful if optimal_allocation_per_timeunit is not None. Default is 8.\n legend_fontsize: Legend font size for individual subplots.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n Plots of response curves.\n \"\"\"\n if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its response \"\n \"curves.\")\n media = media_mix_model.media\n media_maxes = media.max(axis=0) * (1 + percentage_add)\n if media_mix_model._extra_features is not None:\n extra_features = jnp.expand_dims(\n media_mix_model._extra_features.mean(axis=0), axis=0)\n else:\n extra_features = None\n media_ranges = jnp.expand_dims(\n jnp.linspace(start=0, stop=media_maxes, num=steps), axis=0)\n\n make_predictions = jax.vmap(\n jax.vmap(_make_single_prediction,\n in_axes=(None, 0, None, None),\n out_axes=0),\n in_axes=(None, 0, None, None), out_axes=1)\n diagonal = jnp.repeat(\n jnp.eye(media_mix_model.n_media_channels), steps,\n axis=0).reshape(media_mix_model.n_media_channels, steps,\n media_mix_model.n_media_channels)\n\n prediction_offset = media_mix_model.predict(\n media=jnp.zeros((1, *media.shape[1:])),\n extra_features=extra_features).mean(axis=0)\n\n if media.ndim == 3:\n diagonal = jnp.expand_dims(diagonal, axis=-1)\n prediction_offset = jnp.expand_dims(prediction_offset, axis=0)\n mock_media = media_ranges * diagonal\n predictions = jnp.squeeze(a=make_predictions(media_mix_model,\n mock_media,\n extra_features,\n seed))\n predictions = predictions - prediction_offset\n media_ranges = jnp.squeeze(media_ranges)\n if target_scaler:\n predictions = target_scaler.inverse_transform(predictions)\n\n if media_scaler:\n media_ranges = media_scaler.inverse_transform(media_ranges)\n\n if prices is not None:\n if media.ndim == 3:\n prices = jnp.expand_dims(prices, axis=-1)\n media_ranges *= prices\n\n if predictions.ndim == 3:\n media_ranges = jnp.sum(media_ranges, axis=-1)\n predictions = jnp.sum(predictions, axis=-1)\n\n if optimal_allocation_per_timeunit is not None:\n average_allocation = media_mix_model.media.mean(axis=0)\n average_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=average_allocation,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n optimal_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=optimal_allocation_per_timeunit,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n if media_scaler:\n average_allocation = media_scaler.inverse_transform(average_allocation)\n optimal_allocation_per_timeunit = media_scaler.inverse_transform(\n optimal_allocation_per_timeunit)\n if prices is not None:\n optimal_allocation_per_timeunit *= prices\n average_allocation *= prices\n if media.ndim == 3:\n average_allocation = jnp.sum(average_allocation, axis=-1)\n optimal_allocation_per_timeunit = jnp.sum(\n optimal_allocation_per_timeunit, axis=-1)\n\n kpi_label = \"KPI\" if target_scaler else \"Normalized KPI\"\n fig = plt.figure(media_mix_model.n_media_channels + 1,\n figsize=figure_size,\n tight_layout=True)\n n_rows = _calculate_number_rows_plot(\n n_media_channels=media_mix_model.n_media_channels, n_columns=n_columns)\n last_ax = fig.add_subplot(n_rows, 1, n_rows)\n for i in range(media_mix_model.n_media_channels):\n ax = fig.add_subplot(n_rows, n_columns, i + 1)\n sns.lineplot(\n x=media_ranges[:, i],\n y=predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=ax)\n sns.lineplot(\n x=media_ranges[:, i],\n y=jnp.log(predictions[:, i]) if apply_log_scale else predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=last_ax)\n if optimal_allocation_per_timeunit is not None:\n ax.plot(\n average_allocation[i],\n average_allocation_predictions[i],\n marker=\"o\",\n markersize=marker_size,\n label=\"avg_spend\",\n color=_PALETTE[i])\n ax.plot(\n optimal_allocation_per_timeunit[i],\n optimal_allocation_predictions[i],\n marker=\"x\",\n markersize=marker_size + 2,\n label=\"optimal_spend\",\n color=_PALETTE[i])\n ax.set_ylabel(kpi_label)\n ax.set_xlabel(\"Normalized Spend\" if not media_scaler else \"Spend\")\n ax.legend(fontsize=legend_fontsize)\n\n fig.suptitle(\"Response curves\", fontsize=20)\n last_ax.set_ylabel(kpi_label if not apply_log_scale else f\"log({kpi_label})\")\n last_ax.set_xlabel(\"Normalized spend per channel\"\n if not media_scaler else \"Spend per channel\")\n plt.close()\n return fig\n\n\ndef plot_cross_correlate(feature: jnp.ndarray,\n target: jnp.ndarray,\n maxlags: int = 10) -> Tuple[int, float]:\n \"\"\"Plots the cross correlation coefficients between 2 vectors.\n\n In the chart look for positive peaks, this shows how the lags of the feature\n lead the target.\n\n Args:\n feature: Vector, the lags of which predict target.\n target: Vector, what is predicted.\n maxlags: Maximum number of lags.\n\n Returns:\n Lag index and corresponding correlation of the peak correlation.\n\n Raises:\n ValueError: If inputs don't have same length.\n \"\"\"\n if len(feature) != len(target):\n raise ValueError(\"feature and target need to have the same length.\")\n maxlags = jnp.minimum(len(feature) - 1, maxlags)\n mean_feature, mean_target = feature.mean(), target.mean()\n plot = plt.xcorr(\n x=feature - mean_feature, y=target - mean_target, maxlags=maxlags)\n plt.show()\n maxidx = plot[1][plot[0] <= 0].argmax()\n return plot[0][maxidx], plot[1][maxidx]\n\n\ndef plot_var_cost(media: jnp.ndarray, costs: jnp.ndarray,\n names: List[str]) -> matplotlib.figure.Figure:\n \"\"\"Plots a a chart between the coefficient of variation and cost.\n\n Args:\n media: Media matrix.\n costs: Cost vector.\n names: List of variable names.\n\n Returns:\n Plot of coefficient of variation and cost.\n\n Raises:\n ValueError if inputs don't conform to same length.\n \"\"\"\n if media.shape[1] != len(costs):\n raise ValueError(\"media columns and costs needs to have same length.\")\n if media.shape[1] != len(names):\n raise ValueError(\"media columns and names needs to have same length.\")\n coef_of_variation = media.std(axis=0) / media.mean(axis=0)\n\n fig, ax = plt.subplots(1, 1)\n ax.scatter(x=costs, y=coef_of_variation)\n # https://queirozf.com/entries/add-labels-and-text-to-matplotlib-plots-annotation-examples.\n for i in range(len(costs)):\n x, y, label = costs[i], coef_of_variation[i], names[i]\n ax.annotate(text=label, xy=(x, y))\n ax.set_xlabel(\"Cost\")\n ax.set_ylabel(\"Coef of Variation\")\n plt.close()\n return fig\n\n\ndef _create_shaded_line_plot(predictions: jnp.ndarray,\n target: jnp.ndarray,\n axis: matplotlib.axes.Axes,\n title_prefix: str = \"\",\n interval_mid_range: float = .9,\n digits: int = 3) -> None:\n \"\"\"Creates a plot of ground truth, predicted value and credibility interval.\n\n Args:\n predictions: 2d array of predicted values.\n target: Array of true values. Must be same length as predictions.\n axis: Matplotlib axis in which to plot the data.\n title_prefix: Prefix to add as the label of the plot.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n \"\"\"\n if predictions.shape[1] != len(target):\n raise ValueError(\n \"Predicted data and ground-truth data must have same length.\")\n upper_quantile = 1 - (1 - interval_mid_range) / 2\n lower_quantile = (1 - interval_mid_range) / 2\n upper_bound = jnp.quantile(a=predictions, q=upper_quantile, axis=0)\n lower_bound = jnp.quantile(a=predictions, q=lower_quantile, axis=0)\n\n r2, _ = arviz.r2_score(y_true=target, y_pred=predictions)\n mape = 100 * metrics.mean_absolute_percentage_error(\n y_true=target, y_pred=predictions.mean(axis=0))\n axis.plot(jnp.arange(target.shape[0]), target, c=\"grey\", alpha=.9)\n axis.plot(\n jnp.arange(target.shape[0]),\n predictions.mean(axis=0),\n c=\"green\",\n alpha=.9)\n axis.fill_between(\n x=jnp.arange(target.shape[0]),\n y1=lower_bound,\n y2=upper_bound,\n alpha=.35,\n color=\"green\")\n axis.legend([\"True KPI\", \"Predicted KPI\"])\n axis.yaxis.grid(color=\"gray\", linestyle=\"dashed\", alpha=0.3)\n axis.xaxis.grid(color=\"gray\", linestyle=\"dashed\", alpha=0.3)\n title = \" \".join([\n title_prefix,\n \"True and predicted KPI.\",\n \"R2 = {r2:.{digits}f}\".format(r2=r2, digits=digits),\n \"MAPE = {mape:.{digits}f}%\".format(mape=mape, digits=digits)\n ])\n axis.title.set_text(title)\n plt.close()\n\n\ndef _call_fit_plotter(\n predictions: jnp.array,\n target: jnp.array,\n interval_mid_range: float,\n digits: int) -> matplotlib.figure.Figure:\n \"\"\"Calls the shaded line plot once for national and N times for geo models.\n\n Args:\n predictions: 2d array of predicted values.\n target: Array of true values. Must be same length as prediction.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n\n Returns:\n Figure of the plot.\n \"\"\"\n # TODO(): Allow to pass geo names for fit plots", "metadata": {"task_id": "google--lightweight_mmm/5", "ground_truth": " if predictions.ndim == 3: # Multiple plots for geo model\n figure, axes = plt.subplots(predictions.shape[-1],\n figsize=(10, 5 * predictions.shape[-1]))\n for i, ax in enumerate(axes):\n _create_shaded_line_plot(predictions=predictions[..., i],\n target=target[..., i],\n axis=ax,\n title_prefix=f\"Geo {i}:\",\n interval_mid_range=interval_mid_range,\n digits=digits)\n else: # Single plot for national model\n figure, ax = plt.subplots(1, 1)\n _create_shaded_line_plot(predictions=predictions,\n target=target,\n axis=ax,\n interval_mid_range=interval_mid_range,\n digits=digits)\n return figure\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot.py"], "context_start_lineno": 369, "lineno": 688, "function_name": "_call_fit_plotter"}, "groundtruth": " if predictions.ndim == 3: # Multiple plots for geo model\n figure, axes = plt.subplots(predictions.shape[-1],\n figsize=(10, 5 * predictions.shape[-1]))\n for i, ax in enumerate(axes):\n _create_shaded_line_plot(predictions=predictions[..., i],\n target=target[..., i],\n axis=ax,\n title_prefix=f\"Geo {i}:\",\n interval_mid_range=interval_mid_range,\n digits=digits)\n else: # Single plot for national model\n figure, ax = plt.subplots(1, 1)\n _create_shaded_line_plot(predictions=predictions,\n target=target,\n axis=ax,\n interval_mid_range=interval_mid_range,\n digits=digits)\n return figure\n"} +{"prompt": " historic spend for the\n simulation of the response curve.\n apply_log_scale: Whether to apply the log scale to the predictions (Y axis).\n When some media channels have very large scale compare to others it might\n be useful to use apply_log_scale=True. Default is False.\n figure_size: Size of the plot figure.\n n_columns: Number of columns to display in the subplots grid. Modifying this\n parameter might require to adjust figure_size accordingly for the plot\n to still have reasonable structure.\n marker_size: Size of the marker for the optimization annotations. Only\n useful if optimal_allocation_per_timeunit is not None. Default is 8.\n legend_fontsize: Legend font size for individual subplots.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n\n Returns:\n Plots of response curves.\n \"\"\"\n if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its response \"\n \"curves.\")\n media = media_mix_model.media\n media_maxes = media.max(axis=0) * (1 + percentage_add)\n if media_mix_model._extra_features is not None:\n extra_features = jnp.expand_dims(\n media_mix_model._extra_features.mean(axis=0), axis=0)\n else:\n extra_features = None\n media_ranges = jnp.expand_dims(\n jnp.linspace(start=0, stop=media_maxes, num=steps), axis=0)\n\n make_predictions = jax.vmap(\n jax.vmap(_make_single_prediction,\n in_axes=(None, 0, None, None),\n out_axes=0),\n in_axes=(None, 0, None, None), out_axes=1)\n diagonal = jnp.repeat(\n jnp.eye(media_mix_model.n_media_channels), steps,\n axis=0).reshape(media_mix_model.n_media_channels, steps,\n media_mix_model.n_media_channels)\n\n prediction_offset = media_mix_model.predict(\n media=jnp.zeros((1, *media.shape[1:])),\n extra_features=extra_features).mean(axis=0)\n\n if media.ndim == 3:\n diagonal = jnp.expand_dims(diagonal, axis=-1)\n prediction_offset = jnp.expand_dims(prediction_offset, axis=0)\n mock_media = media_ranges * diagonal\n predictions = jnp.squeeze(a=make_predictions(media_mix_model,\n mock_media,\n extra_features,\n seed))\n predictions = predictions - prediction_offset\n media_ranges = jnp.squeeze(media_ranges)\n if target_scaler:\n predictions = target_scaler.inverse_transform(predictions)\n\n if media_scaler:\n media_ranges = media_scaler.inverse_transform(media_ranges)\n\n if prices is not None:\n if media.ndim == 3:\n prices = jnp.expand_dims(prices, axis=-1)\n media_ranges *= prices\n\n if predictions.ndim == 3:\n media_ranges = jnp.sum(media_ranges, axis=-1)\n predictions = jnp.sum(predictions, axis=-1)\n\n if optimal_allocation_per_timeunit is not None:\n average_allocation = media_mix_model.media.mean(axis=0)\n average_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=average_allocation,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n optimal_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=optimal_allocation_per_timeunit,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n if media_scaler:\n average_allocation = media_scaler.inverse_transform(average_allocation)\n optimal_allocation_per_timeunit = media_scaler.inverse_transform(\n optimal_allocation_per_timeunit)\n if prices is not None:\n optimal_allocation_per_timeunit *= prices\n average_allocation *= prices\n if media.ndim == 3:\n average_allocation = jnp.sum(average_allocation, axis=-1)\n optimal_allocation_per_timeunit = jnp.sum(\n optimal_allocation_per_timeunit, axis=-1)\n\n kpi_label = \"KPI\" if target_scaler else \"Normalized KPI\"\n fig = plt.figure(media_mix_model.n_media_channels + 1,\n figsize=figure_size,\n tight_layout=True)\n n_rows = _calculate_number_rows_plot(\n n_media_channels=media_mix_model.n_media_channels, n_columns=n_columns)\n last_ax = fig.add_subplot(n_rows, 1, n_rows)\n for i in range(media_mix_model.n_media_channels):\n ax = fig.add_subplot(n_rows, n_columns, i + 1)\n sns.lineplot(\n x=media_ranges[:, i],\n y=predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=ax)\n sns.lineplot(\n x=media_ranges[:, i],\n y=jnp.log(predictions[:, i]) if apply_log_scale else predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=last_ax)\n if optimal_allocation_per_timeunit is not None:\n ax.plot(\n average_allocation[i],\n average_allocation_predictions[i],\n marker=\"o\",\n markersize=marker_size,\n label=\"avg_spend\",\n color=_PALETTE[i])\n ax.plot(\n optimal_allocation_per_timeunit[i],\n optimal_allocation_predictions[i],\n marker=\"x\",\n markersize=marker_size + 2,\n label=\"optimal_spend\",\n color=_PALETTE[i])\n ax.set_ylabel(kpi_label)\n ax.set_xlabel(\"Normalized Spend\" if not media_scaler else \"Spend\")\n ax.legend(fontsize=legend_fontsize)\n\n fig.suptitle(\"Response curves\", fontsize=20)\n last_ax.set_ylabel(kpi_label if not apply_log_scale else f\"log({kpi_label})\")\n last_ax.set_xlabel(\"Normalized spend per channel\"\n if not media_scaler else \"Spend per channel\")\n plt.close()\n return fig\n\n\ndef plot_cross_correlate(feature: jnp.ndarray,\n target: jnp.ndarray,\n maxlags: int = 10) -> Tuple[int, float]:\n \"\"\"Plots the cross correlation coefficients between 2 vectors.\n\n In the chart look for positive peaks, this shows how the lags of the feature\n lead the target.\n\n Args:\n feature: Vector, the lags of which predict target.\n target: Vector, what is predicted.\n maxlags: Maximum number of lags.\n\n Returns:\n Lag index and corresponding correlation of the peak correlation.\n\n Raises:\n ValueError: If inputs don't have same length.\n \"\"\"\n if len(feature) != len(target):\n raise ValueError(\"feature and target need to have the same length.\")\n maxlags = jnp.minimum(len(feature) - 1, maxlags)\n mean_feature, mean_target = feature.mean(), target.mean()\n plot = plt.xcorr(\n x=feature - mean_feature, y=target - mean_target, maxlags=maxlags)\n plt.show()\n maxidx = plot[1][plot[0] <= 0].argmax()\n return plot[0][maxidx], plot[1][maxidx]\n\n\ndef plot_var_cost(media: jnp.ndarray, costs: jnp.ndarray,\n names: List[str]) -> matplotlib.figure.Figure:\n \"\"\"Plots a a chart between the coefficient of variation and cost.\n\n Args:\n media: Media matrix.\n costs: Cost vector.\n names: List of variable names.\n\n Returns:\n Plot of coefficient of variation and cost.\n\n Raises:\n ValueError if inputs don't conform to same length.\n \"\"\"\n if media.shape[1] != len(costs):\n raise ValueError(\"media columns and costs needs to have same length.\")\n if media.shape[1] != len(names):\n raise ValueError(\"media columns and names needs to have same length.\")\n coef_of_variation = media.std(axis=0) / media.mean(axis=0)\n\n fig, ax = plt.subplots(1, 1)\n ax.scatter(x=costs, y=coef_of_variation)\n # https://queirozf.com/entries/add-labels-and-text-to-matplotlib-plots-annotation-examples.\n for i in range(len(costs)):\n x, y, label = costs[i], coef_of_variation[i], names[i]\n ax.annotate(text=label, xy=(x, y))\n ax.set_xlabel(\"Cost\")\n ax.set_ylabel(\"Coef of Variation\")\n plt.close()\n return fig\n\n\ndef _create_shaded_line_plot(predictions: jnp.ndarray,\n target: jnp.ndarray,\n axis: matplotlib.axes.Axes,\n title_prefix: str = \"\",\n interval_mid_range: float = .9,\n digits: int = 3) -> None:\n \"\"\"Creates a plot of ground truth, predicted value and credibility interval.\n\n Args:\n predictions: 2d array of predicted values.\n target: Array of true values. Must be same length as predictions.\n axis: Matplotlib axis in which to plot the data.\n title_prefix: Prefix to add as the label of the plot.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n \"\"\"\n if predictions.shape[1] != len(target):\n raise ValueError(\n \"Predicted data and ground-truth data must have same length.\")\n upper_quantile = 1 - (1 - interval_mid_range) / 2\n lower_quantile = (1 - interval_mid_range) / 2\n upper_bound = jnp.quantile(a=predictions, q=upper_quantile, axis=0)\n lower_bound = jnp.quantile(a=predictions, q=lower_quantile, axis=0)\n\n r2, _ = arviz.r2_score(y_true=target, y_pred=predictions)\n mape = 100 * metrics.mean_absolute_percentage_error(\n y_true=target, y_pred=predictions.mean(axis=0))\n axis.plot(jnp.arange(target.shape[0]), target, c=\"grey\", alpha=.9)\n axis.plot(\n jnp.arange(target.shape[0]),\n predictions.mean(axis=0),\n c=\"green\",\n alpha=.9)\n axis.fill_between(\n x=jnp.arange(target.shape[0]),\n y1=lower_bound,\n y2=upper_bound,\n alpha=.35,\n color=\"green\")\n axis.legend([\"True KPI\", \"Predicted KPI\"])\n axis.yaxis.grid(color=\"gray\", linestyle=\"dashed\", alpha=0.3)\n axis.xaxis.grid(color=\"gray\", linestyle=\"dashed\", alpha=0.3)\n title = \" \".join([\n title_prefix,\n \"True and predicted KPI.\",\n \"R2 = {r2:.{digits}f}\".format(r2=r2, digits=digits),\n \"MAPE = {mape:.{digits}f}%\".format(mape=mape, digits=digits)\n ])\n axis.title.set_text(title)\n plt.close()\n\n\ndef _call_fit_plotter(\n predictions: jnp.array,\n target: jnp.array,\n interval_mid_range: float,\n digits: int) -> matplotlib.figure.Figure:\n \"\"\"Calls the shaded line plot once for national and N times for geo models.\n\n Args:\n predictions: 2d array of predicted values.\n target: Array of true values. Must be same length as prediction.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n\n Returns:\n Figure of the plot.\n \"\"\"\n # TODO(): Allow to pass geo names for fit plots\n if predictions.ndim == 3: # Multiple plots for geo model\n figure, axes = plt.subplots(predictions.shape[-1],\n figsize=(10, 5 * predictions.shape[-1]))\n for i, ax in enumerate(axes):\n _create_shaded_line_plot(predictions=predictions[..., i],\n target=target[..., i],\n axis=ax,\n title_prefix=f\"Geo {i}:\",\n interval_mid_range=interval_mid_range,\n digits=digits)\n else: # Single plot for national model\n figure, ax = plt.subplots(1, 1)\n _create_shaded_line_plot(predictions=predictions,\n target=target,\n axis=ax,\n interval_mid_range=interval_mid_range,\n digits=digits)\n return figure\n\n\ndef plot_model_fit(media_mix_model: lightweight_mmm.LightweightMMM,\n target_scaler: Optional[preprocessing.CustomScaler] = None,\n interval_mid_range: float = .9,\n digits: int = 3) -> matplotlib.figure.Figure:\n \"\"\"Plots the ground truth, predicted value and interval for the training data.\n\n Model needs to be fit before calling this function to plot.\n\n Args:\n media_mix_model: Media mix model.\n target_scaler: Scaler used for scaling the target, to unscaled values and\n plot in the original scale.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number.\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n\n Returns:\n Plot of model fit.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/6", "ground_truth": " if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its fit.\")\n target_train = media_mix_model._target\n posterior_pred = media_mix_model.trace[\"mu\"]\n if target_scaler:\n posterior_pred = target_scaler.inverse_transform(posterior_pred)\n target_train = target_scaler.inverse_transform(target_train)\n\n return _call_fit_plotter(\n predictions=posterior_pred,\n target=target_train,\n interval_mid_range=interval_mid_range,\n digits=digits)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot.py"], "context_start_lineno": 404, "lineno": 728, "function_name": "plot_model_fit"}, "groundtruth": " if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its fit.\")\n target_train = media_mix_model._target\n posterior_pred = media_mix_model.trace[\"mu\"]\n if target_scaler:\n posterior_pred = target_scaler.inverse_transform(posterior_pred)\n target_train = target_scaler.inverse_transform(target_train)\n\n return _call_fit_plotter(\n predictions=posterior_pred,\n target=target_train,\n interval_mid_range=interval_mid_range,\n digits=digits)\n"} +{"prompt": "map(\n jax.vmap(_make_single_prediction,\n in_axes=(None, 0, None, None),\n out_axes=0),\n in_axes=(None, 0, None, None), out_axes=1)\n diagonal = jnp.repeat(\n jnp.eye(media_mix_model.n_media_channels), steps,\n axis=0).reshape(media_mix_model.n_media_channels, steps,\n media_mix_model.n_media_channels)\n\n prediction_offset = media_mix_model.predict(\n media=jnp.zeros((1, *media.shape[1:])),\n extra_features=extra_features).mean(axis=0)\n\n if media.ndim == 3:\n diagonal = jnp.expand_dims(diagonal, axis=-1)\n prediction_offset = jnp.expand_dims(prediction_offset, axis=0)\n mock_media = media_ranges * diagonal\n predictions = jnp.squeeze(a=make_predictions(media_mix_model,\n mock_media,\n extra_features,\n seed))\n predictions = predictions - prediction_offset\n media_ranges = jnp.squeeze(media_ranges)\n if target_scaler:\n predictions = target_scaler.inverse_transform(predictions)\n\n if media_scaler:\n media_ranges = media_scaler.inverse_transform(media_ranges)\n\n if prices is not None:\n if media.ndim == 3:\n prices = jnp.expand_dims(prices, axis=-1)\n media_ranges *= prices\n\n if predictions.ndim == 3:\n media_ranges = jnp.sum(media_ranges, axis=-1)\n predictions = jnp.sum(predictions, axis=-1)\n\n if optimal_allocation_per_timeunit is not None:\n average_allocation = media_mix_model.media.mean(axis=0)\n average_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=average_allocation,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n optimal_allocation_predictions = _generate_diagonal_predictions(\n media_mix_model=media_mix_model,\n media_values=optimal_allocation_per_timeunit,\n extra_features=extra_features,\n target_scaler=target_scaler,\n prediction_offset=prediction_offset,\n seed=seed)\n if media_scaler:\n average_allocation = media_scaler.inverse_transform(average_allocation)\n optimal_allocation_per_timeunit = media_scaler.inverse_transform(\n optimal_allocation_per_timeunit)\n if prices is not None:\n optimal_allocation_per_timeunit *= prices\n average_allocation *= prices\n if media.ndim == 3:\n average_allocation = jnp.sum(average_allocation, axis=-1)\n optimal_allocation_per_timeunit = jnp.sum(\n optimal_allocation_per_timeunit, axis=-1)\n\n kpi_label = \"KPI\" if target_scaler else \"Normalized KPI\"\n fig = plt.figure(media_mix_model.n_media_channels + 1,\n figsize=figure_size,\n tight_layout=True)\n n_rows = _calculate_number_rows_plot(\n n_media_channels=media_mix_model.n_media_channels, n_columns=n_columns)\n last_ax = fig.add_subplot(n_rows, 1, n_rows)\n for i in range(media_mix_model.n_media_channels):\n ax = fig.add_subplot(n_rows, n_columns, i + 1)\n sns.lineplot(\n x=media_ranges[:, i],\n y=predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=ax)\n sns.lineplot(\n x=media_ranges[:, i],\n y=jnp.log(predictions[:, i]) if apply_log_scale else predictions[:, i],\n label=media_mix_model.media_names[i],\n color=_PALETTE[i],\n ax=last_ax)\n if optimal_allocation_per_timeunit is not None:\n ax.plot(\n average_allocation[i],\n average_allocation_predictions[i],\n marker=\"o\",\n markersize=marker_size,\n label=\"avg_spend\",\n color=_PALETTE[i])\n ax.plot(\n optimal_allocation_per_timeunit[i],\n optimal_allocation_predictions[i],\n marker=\"x\",\n markersize=marker_size + 2,\n label=\"optimal_spend\",\n color=_PALETTE[i])\n ax.set_ylabel(kpi_label)\n ax.set_xlabel(\"Normalized Spend\" if not media_scaler else \"Spend\")\n ax.legend(fontsize=legend_fontsize)\n\n fig.suptitle(\"Response curves\", fontsize=20)\n last_ax.set_ylabel(kpi_label if not apply_log_scale else f\"log({kpi_label})\")\n last_ax.set_xlabel(\"Normalized spend per channel\"\n if not media_scaler else \"Spend per channel\")\n plt.close()\n return fig\n\n\ndef plot_cross_correlate(feature: jnp.ndarray,\n target: jnp.ndarray,\n maxlags: int = 10) -> Tuple[int, float]:\n \"\"\"Plots the cross correlation coefficients between 2 vectors.\n\n In the chart look for positive peaks, this shows how the lags of the feature\n lead the target.\n\n Args:\n feature: Vector, the lags of which predict target.\n target: Vector, what is predicted.\n maxlags: Maximum number of lags.\n\n Returns:\n Lag index and corresponding correlation of the peak correlation.\n\n Raises:\n ValueError: If inputs don't have same length.\n \"\"\"\n if len(feature) != len(target):\n raise ValueError(\"feature and target need to have the same length.\")\n maxlags = jnp.minimum(len(feature) - 1, maxlags)\n mean_feature, mean_target = feature.mean(), target.mean()\n plot = plt.xcorr(\n x=feature - mean_feature, y=target - mean_target, maxlags=maxlags)\n plt.show()\n maxidx = plot[1][plot[0] <= 0].argmax()\n return plot[0][maxidx], plot[1][maxidx]\n\n\ndef plot_var_cost(media: jnp.ndarray, costs: jnp.ndarray,\n names: List[str]) -> matplotlib.figure.Figure:\n \"\"\"Plots a a chart between the coefficient of variation and cost.\n\n Args:\n media: Media matrix.\n costs: Cost vector.\n names: List of variable names.\n\n Returns:\n Plot of coefficient of variation and cost.\n\n Raises:\n ValueError if inputs don't conform to same length.\n \"\"\"\n if media.shape[1] != len(costs):\n raise ValueError(\"media columns and costs needs to have same length.\")\n if media.shape[1] != len(names):\n raise ValueError(\"media columns and names needs to have same length.\")\n coef_of_variation = media.std(axis=0) / media.mean(axis=0)\n\n fig, ax = plt.subplots(1, 1)\n ax.scatter(x=costs, y=coef_of_variation)\n # https://queirozf.com/entries/add-labels-and-text-to-matplotlib-plots-annotation-examples.\n for i in range(len(costs)):\n x, y, label = costs[i], coef_of_variation[i], names[i]\n ax.annotate(text=label, xy=(x, y))\n ax.set_xlabel(\"Cost\")\n ax.set_ylabel(\"Coef of Variation\")\n plt.close()\n return fig\n\n\ndef _create_shaded_line_plot(predictions: jnp.ndarray,\n target: jnp.ndarray,\n axis: matplotlib.axes.Axes,\n title_prefix: str = \"\",\n interval_mid_range: float = .9,\n digits: int = 3) -> None:\n \"\"\"Creates a plot of ground truth, predicted value and credibility interval.\n\n Args:\n predictions: 2d array of predicted values.\n target: Array of true values. Must be same length as predictions.\n axis: Matplotlib axis in which to plot the data.\n title_prefix: Prefix to add as the label of the plot.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n \"\"\"\n if predictions.shape[1] != len(target):\n raise ValueError(\n \"Predicted data and ground-truth data must have same length.\")\n upper_quantile = 1 - (1 - interval_mid_range) / 2\n lower_quantile = (1 - interval_mid_range) / 2\n upper_bound = jnp.quantile(a=predictions, q=upper_quantile, axis=0)\n lower_bound = jnp.quantile(a=predictions, q=lower_quantile, axis=0)\n\n r2, _ = arviz.r2_score(y_true=target, y_pred=predictions)\n mape = 100 * metrics.mean_absolute_percentage_error(\n y_true=target, y_pred=predictions.mean(axis=0))\n axis.plot(jnp.arange(target.shape[0]), target, c=\"grey\", alpha=.9)\n axis.plot(\n jnp.arange(target.shape[0]),\n predictions.mean(axis=0),\n c=\"green\",\n alpha=.9)\n axis.fill_between(\n x=jnp.arange(target.shape[0]),\n y1=lower_bound,\n y2=upper_bound,\n alpha=.35,\n color=\"green\")\n axis.legend([\"True KPI\", \"Predicted KPI\"])\n axis.yaxis.grid(color=\"gray\", linestyle=\"dashed\", alpha=0.3)\n axis.xaxis.grid(color=\"gray\", linestyle=\"dashed\", alpha=0.3)\n title = \" \".join([\n title_prefix,\n \"True and predicted KPI.\",\n \"R2 = {r2:.{digits}f}\".format(r2=r2, digits=digits),\n \"MAPE = {mape:.{digits}f}%\".format(mape=mape, digits=digits)\n ])\n axis.title.set_text(title)\n plt.close()\n\n\ndef _call_fit_plotter(\n predictions: jnp.array,\n target: jnp.array,\n interval_mid_range: float,\n digits: int) -> matplotlib.figure.Figure:\n \"\"\"Calls the shaded line plot once for national and N times for geo models.\n\n Args:\n predictions: 2d array of predicted values.\n target: Array of true values. Must be same length as prediction.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n\n Returns:\n Figure of the plot.\n \"\"\"\n # TODO(): Allow to pass geo names for fit plots\n if predictions.ndim == 3: # Multiple plots for geo model\n figure, axes = plt.subplots(predictions.shape[-1],\n figsize=(10, 5 * predictions.shape[-1]))\n for i, ax in enumerate(axes):\n _create_shaded_line_plot(predictions=predictions[..., i],\n target=target[..., i],\n axis=ax,\n title_prefix=f\"Geo {i}:\",\n interval_mid_range=interval_mid_range,\n digits=digits)\n else: # Single plot for national model\n figure, ax = plt.subplots(1, 1)\n _create_shaded_line_plot(predictions=predictions,\n target=target,\n axis=ax,\n interval_mid_range=interval_mid_range,\n digits=digits)\n return figure\n\n\ndef plot_model_fit(media_mix_model: lightweight_mmm.LightweightMMM,\n target_scaler: Optional[preprocessing.CustomScaler] = None,\n interval_mid_range: float = .9,\n digits: int = 3) -> matplotlib.figure.Figure:\n \"\"\"Plots the ground truth, predicted value and interval for the training data.\n\n Model needs to be fit before calling this function to plot.\n\n Args:\n media_mix_model: Media mix model.\n target_scaler: Scaler used for scaling the target, to unscaled values and\n plot in the original scale.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number.\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n\n Returns:\n Plot of model fit.\n \"\"\"\n if not hasattr(media_mix_model, \"trace\"):\n raise lightweight_mmm.NotFittedModelError(\n \"Model needs to be fit first before attempting to plot its fit.\")\n target_train = media_mix_model._target\n posterior_pred = media_mix_model.trace[\"mu\"]\n if target_scaler:\n posterior_pred = target_scaler.inverse_transform(posterior_pred)\n target_train = target_scaler.inverse_transform(target_train)\n\n return _call_fit_plotter(\n predictions=posterior_pred,\n target=target_train,\n interval_mid_range=interval_mid_range,\n digits=digits)\n\n\ndef plot_out_of_sample_model_fit(out_of_sample_predictions: jnp.ndarray,\n out_of_sample_target: jnp.ndarray,\n interval_mid_range: float = .9,\n digits: int = 3) -> matplotlib.figure.Figure:\n \"\"\"Plots the ground truth, predicted value and interval for the test data.\n\n Args:\n out_of_sample_predictions: Predictions for the out-of-sample period, as\n derived from mmm.predict.\n out_of_sample_target: Target for the out-of-sample period. Needs to be on\n the same scale as out_of_sample_predictions.\n interval_mid_range: Mid range interval to take for plotting. Eg. .9 will use\n .05 and .95 as the lower and upper quantiles. Must be a float number.\n between 0 and 1.\n digits: Number of decimals to display on metrics in the plot.\n\n Returns:\n Plot of model fit.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/7", "ground_truth": " return _call_fit_plotter(\n predictions=out_of_sample_predictions,\n target=out_of_sample_target,\n interval_mid_range=interval_mid_range,\n digits=digits)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot.py"], "context_start_lineno": 437, "lineno": 763, "function_name": "plot_out_of_sample_model_fit"}, "groundtruth": " return _call_fit_plotter(\n predictions=out_of_sample_predictions,\n target=out_of_sample_target,\n interval_mid_range=interval_mid_range,\n digits=digits)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for seasonality.\"\"\"\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import handlers\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core.time import seasonality\n\n\nclass SeasonalityTest(parameterized.TestCase):\n\n @parameterized.named_parameters([\n dict(\n testcase_name=\"2_degrees\",\n seasonality_arange_value=150,\n degrees_arange_shape=5,\n gamma_seasonality_shape=(5, 2),\n ),\n dict(\n testcase_name=\"10_degree\",\n seasonality_arange_value=150,\n degrees_arange_shape=10,\n gamma_seasonality_shape=(10, 2),\n ),\n dict(\n testcase_name=\"1_degree\",\n seasonality_arange_value=200,\n degrees_arange_shape=1,\n gamma_seasonality_shape=(1, 2),\n ),\n ])\n def test_core_sinusoidal_seasonality_produces_correct_shape(\n self, seasonality_arange_value, degrees_arange_shape,\n gamma_seasonality_shape):\n seasonality_arange = jnp.expand_dims(\n jnp.arange(seasonality_arange_value), axis=-1)\n degrees_arange = jnp.arange(degrees_arange_shape)\n gamma_seasonality = jnp.ones(gamma_seasonality_shape)\n\n seasonality_values = seasonality._sinusoidal_seasonality(\n seasonality_arange=seasonality_arange,\n degrees_arange=degrees_arange,\n gamma_seasonality=gamma_seasonality,\n frequency=52,\n )\n self.assertEqual(seasonality_values.shape, (seasonality_arange_value,))\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"ten_degrees_national\",\n data_shape=(500, 5),\n degrees_seasonality=10,\n expected_shape=(10, 500),\n ),\n dict(\n testcase_name=\"ten_degrees_geo\",\n data_shape=(500, 5, 5),\n degrees_seasonality=10,\n expected_shape=(10, 500, 1),\n ),\n dict(\n testcase_name=\"one_degrees_national\",\n data_shape=(500, 5),\n degrees_seasonality=1,\n expected_shape=(10, 500),\n ),\n dict(\n testcase_name=\"one_degrees_geo\",\n data_shape=(500, 5, 5),\n degrees_seasonality=1,\n expected_shape=(10, 500, 1),\n ),\n )\n def test_model_sinusoidal_seasonality_produces_correct_shape(\n self, data_shape, degrees_seasonality, expected_shape):\n\n def mock_model_function(data, degrees_seasonality, frequency):", "metadata": {"task_id": "google--lightweight_mmm/8", "ground_truth": " numpyro.deterministic(\n \"seasonality\",\n seasonality.sinusoidal_seasonality(\n data=data,\n degrees_seasonality=degrees_seasonality,\n custom_priors={},\n frequency=frequency))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "seasonality_test.py"], "context_start_lineno": 0, "lineno": 96, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"seasonality\",\n seasonality.sinusoidal_seasonality(\n data=data,\n degrees_seasonality=degrees_seasonality,\n custom_priors={},\n frequency=frequency))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for seasonality.\"\"\"\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import handlers\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core.time import seasonality\n\n\nclass SeasonalityTest(parameterized.TestCase):\n\n @parameterized.named_parameters([\n dict(\n testcase_name=\"2_degrees\",\n seasonality_arange_value=150,\n degrees_arange_shape=5,\n gamma_seasonality_shape=(5, 2),\n ),\n dict(\n testcase_name=\"10_degree\",\n seasonality_arange_value=150,\n degrees_arange_shape=10,\n gamma_seasonality_shape=(10, 2),\n ),\n dict(\n testcase_name=\"1_degree\",\n seasonality_arange_value=200,\n degrees_arange_shape=1,\n gamma_seasonality_shape=(1, 2),\n ),\n ])\n def test_core_sinusoidal_seasonality_produces_correct_shape(\n self, seasonality_arange_value, degrees_arange_shape,\n gamma_seasonality_shape):\n seasonality_arange = jnp.expand_dims(\n jnp.arange(seasonality_arange_value), axis=-1)\n degrees_arange = jnp.arange(degrees_arange_shape)\n gamma_seasonality = jnp.ones(gamma_seasonality_shape)\n\n seasonality_values = seasonality._sinusoidal_seasonality(\n seasonality_arange=seasonality_arange,\n degrees_arange=degrees_arange,\n gamma_seasonality=gamma_seasonality,\n frequency=52,\n )\n self.assertEqual(seasonality_values.shape, (seasonality_arange_value,))\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"ten_degrees_national\",\n data_shape=(500, 5),\n degrees_seasonality=10,\n expected_shape=(10, 500),\n ),\n dict(\n testcase_name=\"ten_degrees_geo\",\n data_shape=(500, 5, 5),\n degrees_seasonality=10,\n expected_shape=(10, 500, 1),\n ),\n dict(\n testcase_name=\"one_degrees_national\",\n data_shape=(500, 5),\n degrees_seasonality=1,\n expected_shape=(10, 500),\n ),\n dict(\n testcase_name=\"one_degrees_geo\",\n data_shape=(500, 5, 5),\n degrees_seasonality=1,\n expected_shape=(10, 500, 1),\n ),\n )\n def test_model_sinusoidal_seasonality_produces_correct_shape(\n self, data_shape, degrees_seasonality, expected_shape):\n\n def mock_model_function(data, degrees_seasonality, frequency):\n numpyro.deterministic(\n \"seasonality\",\n seasonality.sinusoidal_seasonality(\n data=data,\n degrees_seasonality=degrees_seasonality,\n custom_priors={},\n frequency=frequency))\n\n num_samples = 10\n data = jnp.ones(data_shape)\n kernel = numpyro.infer.NUTS(model=mock_model_function)\n mcmc = numpyro.infer.MCMC(\n sampler=kernel, num_warmup=10, num_samples=num_samples, num_chains=1)\n rng_key = jax.random.PRNGKey(0)\n\n mcmc.run(\n rng_key,\n data=data,\n degrees_seasonality=degrees_seasonality,\n frequency=52,\n )\n seasonality_values = mcmc.get_samples()[\"seasonality\"]\n\n self.assertEqual(seasonality_values.shape, expected_shape)\n\n def test_sinusoidal_seasonality_custom_priors_are_taken_correctly(self):\n prior_name = priors.GAMMA_SEASONALITY\n expected_value1, expected_value2 = 5.2, 7.56\n custom_priors = {\n prior_name:\n dist.Kumaraswamy(\n concentration1=expected_value1, concentration0=expected_value2)\n }\n media = jnp.ones((10, 5, 5))\n degrees_seasonality = 3\n frequency = 365\n\n trace_handler = handlers.trace(\n handlers.seed(seasonality.sinusoidal_seasonality, rng_seed=0))\n trace = trace_handler.get_trace(\n data=media,\n custom_priors=custom_priors,\n degrees_seasonality=degrees_seasonality,\n frequency=frequency,\n )\n values_and_dists = {\n name: site[\"fn\"] for name, site in trace.items() if \"fn\" in site\n }\n\n used_distribution = values_and_dists[prior_name]\n if isinstance(used_distribution, dist.ExpandedDistribution):\n used_distribution = used_distribution.base_dist\n self.assertIsInstance(used_distribution, dist.Kumaraswamy)\n self.assertEqual(used_distribution.concentration0, expected_value2)\n self.assertEqual(used_distribution.concentration1, expected_value1)\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"ten_degrees\",\n data_shape=(500, 3),\n expected_shape=(10, 500),\n ),\n dict(\n testcase_name=\"five_degrees\",\n data_shape=(500, 3, 5),\n expected_shape=(10, 500, 1),\n ),\n )\n def test_intra_week_seasonality_produces_correct_shape(\n self, data_shape, expected_shape):\n\n def mock_model_function(data):", "metadata": {"task_id": "google--lightweight_mmm/9", "ground_truth": " numpyro.deterministic(\n \"intra_week\",\n seasonality.intra_week_seasonality(\n data=data,\n custom_priors={},\n ))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "seasonality_test.py"], "context_start_lineno": 0, "lineno": 168, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"intra_week\",\n seasonality.intra_week_seasonality(\n data=data,\n custom_priors={},\n ))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for trend.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import handlers\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core.time import trend\n\n\nclass TrendTest(parameterized.TestCase):\n\n @parameterized.named_parameters([\n dict(\n testcase_name=\"national\",\n coef_trend_shape=(),\n trend_length=150,\n expo_trend_shape=(),\n ),\n dict(\n testcase_name=\"geo\",\n coef_trend_shape=(5,),\n trend_length=150,\n expo_trend_shape=(),\n ),\n ])\n def test_core_trend_with_exponent_produces_correct_shape(\n self, coef_trend_shape, trend_length, expo_trend_shape):\n coef_trend = jnp.ones(coef_trend_shape)\n linear_trend = jnp.arange(trend_length)\n if coef_trend.ndim == 1: # For geo model's case\n linear_trend = jnp.expand_dims(linear_trend, axis=-1)\n expo_trend = jnp.ones(expo_trend_shape)\n\n trend_values = trend._trend_with_exponent(\n coef_trend=coef_trend, trend=linear_trend, expo_trend=expo_trend)\n\n self.assertEqual(trend_values.shape,\n (linear_trend.shape[0], *coef_trend_shape))\n\n @parameterized.named_parameters([\n dict(testcase_name=\"national\", data_shape=(150, 3)),\n dict(testcase_name=\"geo\", data_shape=(150, 3, 5)),\n ])\n def test_trend_with_exponent_produces_correct_shape(self, data_shape):\n\n def mock_model_function(data):", "metadata": {"task_id": "google--lightweight_mmm/10", "ground_truth": " numpyro.deterministic(\n \"trend\", trend.trend_with_exponent(\n data=data,\n custom_priors={},\n ))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "trend_test.py"], "context_start_lineno": 0, "lineno": 67, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"trend\", trend.trend_with_exponent(\n data=data,\n custom_priors={},\n ))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for trend.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import handlers\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core.time import trend\n\n\nclass TrendTest(parameterized.TestCase):\n\n @parameterized.named_parameters([\n dict(\n testcase_name=\"national\",\n coef_trend_shape=(),\n trend_length=150,\n expo_trend_shape=(),\n ),\n dict(\n testcase_name=\"geo\",\n coef_trend_shape=(5,),\n trend_length=150,\n expo_trend_shape=(),\n ),\n ])\n def test_core_trend_with_exponent_produces_correct_shape(\n self, coef_trend_shape, trend_length, expo_trend_shape):\n coef_trend = jnp.ones(coef_trend_shape)\n linear_trend = jnp.arange(trend_length)\n if coef_trend.ndim == 1: # For geo model's case\n linear_trend = jnp.expand_dims(linear_trend, axis=-1)\n expo_trend = jnp.ones(expo_trend_shape)\n\n trend_values = trend._trend_with_exponent(\n coef_trend=coef_trend, trend=linear_trend, expo_trend=expo_trend)\n\n self.assertEqual(trend_values.shape,\n (linear_trend.shape[0], *coef_trend_shape))\n\n @parameterized.named_parameters([\n dict(testcase_name=\"national\", data_shape=(150, 3)),\n dict(testcase_name=\"geo\", data_shape=(150, 3, 5)),\n ])\n def test_trend_with_exponent_produces_correct_shape(self, data_shape):\n\n def mock_model_function(data):\n numpyro.deterministic(\n \"trend\", trend.trend_with_exponent(\n data=data,\n custom_priors={},\n ))\n\n num_samples = 10\n data = jnp.ones(data_shape)\n kernel = numpyro.infer.NUTS(model=mock_model_function)\n mcmc = numpyro.infer.MCMC(\n sampler=kernel, num_warmup=10, num_samples=num_samples, num_chains=1)\n rng_key = jax.random.PRNGKey(0)\n coef_expected_shape = () if data.ndim == 2 else (data.shape[2],)\n\n mcmc.run(rng_key, data=data)\n trend_values = mcmc.get_samples()[\"trend\"]\n\n self.assertEqual(trend_values.shape,\n (num_samples, data.shape[0], *coef_expected_shape))\n\n @parameterized.named_parameters(\n dict(\n testcase_name=f\"model_{priors.COEF_TREND}\",\n prior_name=priors.COEF_TREND,\n ),\n dict(\n testcase_name=f\"model_{priors.EXPO_TREND}\",\n prior_name=priors.EXPO_TREND,\n ),\n )\n def test_trend_with_exponent_custom_priors_are_taken_correctly(\n self, prior_name):\n expected_value1, expected_value2 = 5.2, 7.56\n custom_priors = {\n prior_name:\n dist.Kumaraswamy(\n concentration1=expected_value1, concentration0=expected_value2)\n }\n media = jnp.ones((10, 5, 5))\n\n trace_handler = handlers.trace(\n handlers.seed(trend.trend_with_exponent, rng_seed=0))\n trace = trace_handler.get_trace(\n data=media,\n custom_priors=custom_priors,\n )\n values_and_dists = {\n name: site[\"fn\"] for name, site in trace.items() if \"fn\" in site\n }\n\n used_distribution = values_and_dists[prior_name]\n if isinstance(used_distribution, dist.ExpandedDistribution):\n used_distribution = used_distribution.base_dist\n self.assertIsInstance(used_distribution, dist.Kumaraswamy)\n self.assertEqual(used_distribution.concentration0, expected_value2)\n self.assertEqual(used_distribution.concentration1, expected_value1)\n\n @parameterized.named_parameters([\n dict(\n testcase_name=\"dynamic_trend_national_shape\",\n number_periods=100,\n initial_level_shape=(),\n initial_slope_shape=(),\n variance_level_shape=(),\n variance_slope_shape=(),\n ),\n dict(\n testcase_name=\"dynamic_trend_geo_shape\",\n number_periods=100,\n initial_level_shape=(2,),\n initial_slope_shape=(2,),\n variance_level_shape=(2,),\n variance_slope_shape=(2,),\n ),\n ])\n def test_core_dynamic_trend_produces_correct_shape(\n self, number_periods, initial_level_shape, initial_slope_shape,\n variance_level_shape, variance_slope_shape):\n initial_level = jnp.ones(initial_level_shape)\n initial_slope = jnp.ones(initial_slope_shape)\n variance_level = jnp.ones(variance_level_shape)\n variance_slope = jnp.ones(variance_slope_shape)\n random_walk_level = jnp.arange(number_periods)\n random_walk_slope = jnp.arange(number_periods)\n if initial_level.ndim == 1: # For geo model's case\n random_walk_level = jnp.expand_dims(random_walk_level, axis=-1)\n random_walk_slope = jnp.expand_dims(random_walk_slope, axis=-1)\n\n dynamic_trend_values = trend._dynamic_trend(\n number_periods=number_periods,\n random_walk_level=random_walk_level,\n random_walk_slope=random_walk_slope,\n initial_level=initial_level,\n initial_slope=initial_slope,\n variance_level=variance_level,\n variance_slope=variance_slope,\n )\n\n self.assertEqual(dynamic_trend_values.shape,\n (number_periods, *initial_level_shape))\n\n def test_core_dynamic_trend_produces_correct_value(self):\n number_periods = 5\n initial_level = jnp.ones(())\n initial_slope = jnp.ones(())\n variance_level = jnp.ones(())\n variance_slope = jnp.ones(())\n random_walk_level = jnp.arange(number_periods)\n random_walk_slope = jnp.arange(number_periods)\n dynamic_trend_expected_value = jnp.array([1, 3, 7, 14, 25])\n\n dynamic_trend_values = trend._dynamic_trend(\n number_periods=number_periods,\n random_walk_level=random_walk_level,\n random_walk_slope=random_walk_slope,\n initial_level=initial_level,\n initial_slope=initial_slope,\n variance_level=variance_level,\n variance_slope=variance_slope,\n )\n\n np.testing.assert_array_equal(x=dynamic_trend_values,\n y=dynamic_trend_expected_value)\n\n @parameterized.named_parameters([\n dict(\n testcase_name=\"national_with_prediction_is_true\",\n data_shape=(100, 3),\n is_trend_prediction=True),\n dict(\n testcase_name=\"geo_with_prediction_is_true\",\n data_shape=(150, 3, 5),\n is_trend_prediction=True),\n dict(\n testcase_name=\"national_with_prediction_is_false\",\n data_shape=(100, 3),\n is_trend_prediction=False),\n dict(\n testcase_name=\"geo_with_prediction_is_false\",\n data_shape=(150, 3, 5),\n is_trend_prediction=False),\n ])\n def test_dynamic_trend_produces_correct_shape(\n self, data_shape, is_trend_prediction):\n\n def mock_model_function(geo_size, data_size):", "metadata": {"task_id": "google--lightweight_mmm/11", "ground_truth": " numpyro.deterministic(\n \"trend\", trend.dynamic_trend(\n geo_size=geo_size,\n data_size=data_size,\n is_trend_prediction=is_trend_prediction,\n custom_priors={},\n ))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "trend_test.py"], "context_start_lineno": 0, "lineno": 213, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"trend\", trend.dynamic_trend(\n geo_size=geo_size,\n data_size=data_size,\n is_trend_prediction=is_trend_prediction,\n custom_priors={},\n ))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Core and modelling functions for seasonality.\"\"\"\n\nfrom typing import Mapping\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core import core_utils\n\n\n@jax.jit\ndef _sinusoidal_seasonality(\n seasonality_arange: jnp.ndarray,\n degrees_arange: jnp.ndarray,\n gamma_seasonality: jnp.ndarray,\n frequency: int,\n) -> jnp.ndarray:\n \"\"\"Core calculation of cyclic variation seasonality.\n\n Args:\n seasonality_arange: Array with range [0, N - 1] where N is the size of the\n data for which the seasonality is modelled.\n degrees_arange: Array with range [0, D - 1] where D is the number of degrees\n to use. Must be greater or equal than 1.\n gamma_seasonality: Factor to multiply to each degree calculation. Shape must\n be aligned with the number of degrees.\n frequency: Frecuency of the seasonality be in computed.\n\n Returns:\n An array with the seasonality values.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/12", "ground_truth": " inner_value = seasonality_arange * 2 * jnp.pi * degrees_arange / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return jnp.einsum(\"tds, ds -> t\", season_matrix, gamma_seasonality)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "seasonality.py"], "context_start_lineno": 0, "lineno": 48, "function_name": "_sinusoidal_seasonality"}, "groundtruth": " inner_value = seasonality_arange * 2 * jnp.pi * degrees_arange / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return jnp.einsum(\"tds, ds -> t\", season_matrix, gamma_seasonality)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Core and modelling functions for seasonality.\"\"\"\n\nfrom typing import Mapping\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core import core_utils\n\n\n@jax.jit\ndef _sinusoidal_seasonality(\n seasonality_arange: jnp.ndarray,\n degrees_arange: jnp.ndarray,\n gamma_seasonality: jnp.ndarray,\n frequency: int,\n) -> jnp.ndarray:\n \"\"\"Core calculation of cyclic variation seasonality.\n\n Args:\n seasonality_arange: Array with range [0, N - 1] where N is the size of the\n data for which the seasonality is modelled.\n degrees_arange: Array with range [0, D - 1] where D is the number of degrees\n to use. Must be greater or equal than 1.\n gamma_seasonality: Factor to multiply to each degree calculation. Shape must\n be aligned with the number of degrees.\n frequency: Frecuency of the seasonality be in computed.\n\n Returns:\n An array with the seasonality values.\n \"\"\"\n inner_value = seasonality_arange * 2 * jnp.pi * degrees_arange / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return jnp.einsum(\"tds, ds -> t\", season_matrix, gamma_seasonality)\n\n\ndef sinusoidal_seasonality(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n degrees_seasonality: int = 2,\n frequency: int = 52,\n) -> jnp.ndarray:\n \"\"\"Calculates cyclic variation seasonality.\n\n For detailed info check:\n https://en.wikipedia.org/wiki/Seasonality#Modeling\n\n Args:\n data: Data for which the seasonality will be modelled for. It is used to\n obtain the length of the time dimension, axis 0.\n custom_priors: The custom priors we want the model to take instead of\n default ones.\n degrees_seasonality: Number of degrees to use. Must be greater or equal than\n 1.\n frequency: Frecuency of the seasonality be in computed. By default is 52 for\n weekly data (52 weeks in a year).\n\n Returns:\n An array with the seasonality values.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/13", "ground_truth": " number_periods = data.shape[0]\n default_priors = priors.get_default_priors()\n n_geos = core_utils.get_number_geos(data=data)\n with numpyro.plate(name=f\"{priors.GAMMA_SEASONALITY}_sin_cos_plate\", size=2):\n with numpyro.plate(\n name=f\"{priors.GAMMA_SEASONALITY}_plate\", size=degrees_seasonality):\n gamma_seasonality = numpyro.sample(\n name=priors.GAMMA_SEASONALITY,\n fn=custom_priors.get(priors.GAMMA_SEASONALITY,\n default_priors[priors.GAMMA_SEASONALITY]))\n seasonality_arange = jnp.expand_dims(a=jnp.arange(number_periods), axis=-1)\n degrees_arange = jnp.arange(degrees_seasonality)\n seasonality_values = _sinusoidal_seasonality(\n seasonality_arange=seasonality_arange,\n degrees_arange=degrees_arange,\n frequency=frequency,\n gamma_seasonality=gamma_seasonality,\n )\n if n_geos > 1:\n seasonality_values = jnp.expand_dims(seasonality_values, axis=-1)\n return seasonality_values\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "seasonality.py"], "context_start_lineno": 0, "lineno": 84, "function_name": "sinusoidal_seasonality"}, "groundtruth": " number_periods = data.shape[0]\n default_priors = priors.get_default_priors()\n n_geos = core_utils.get_number_geos(data=data)\n with numpyro.plate(name=f\"{priors.GAMMA_SEASONALITY}_sin_cos_plate\", size=2):\n with numpyro.plate(\n name=f\"{priors.GAMMA_SEASONALITY}_plate\", size=degrees_seasonality):\n gamma_seasonality = numpyro.sample(\n name=priors.GAMMA_SEASONALITY,\n fn=custom_priors.get(priors.GAMMA_SEASONALITY,\n default_priors[priors.GAMMA_SEASONALITY]))\n seasonality_arange = jnp.expand_dims(a=jnp.arange(number_periods), axis=-1)\n degrees_arange = jnp.arange(degrees_seasonality)\n seasonality_values = _sinusoidal_seasonality(\n seasonality_arange=seasonality_arange,\n degrees_arange=degrees_arange,\n frequency=frequency,\n gamma_seasonality=gamma_seasonality,\n )\n if n_geos > 1:\n seasonality_values = jnp.expand_dims(seasonality_values, axis=-1)\n return seasonality_values\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Core and modelling functions for seasonality.\"\"\"\n\nfrom typing import Mapping\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core import core_utils\n\n\n@jax.jit\ndef _sinusoidal_seasonality(\n seasonality_arange: jnp.ndarray,\n degrees_arange: jnp.ndarray,\n gamma_seasonality: jnp.ndarray,\n frequency: int,\n) -> jnp.ndarray:\n \"\"\"Core calculation of cyclic variation seasonality.\n\n Args:\n seasonality_arange: Array with range [0, N - 1] where N is the size of the\n data for which the seasonality is modelled.\n degrees_arange: Array with range [0, D - 1] where D is the number of degrees\n to use. Must be greater or equal than 1.\n gamma_seasonality: Factor to multiply to each degree calculation. Shape must\n be aligned with the number of degrees.\n frequency: Frecuency of the seasonality be in computed.\n\n Returns:\n An array with the seasonality values.\n \"\"\"\n inner_value = seasonality_arange * 2 * jnp.pi * degrees_arange / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return jnp.einsum(\"tds, ds -> t\", season_matrix, gamma_seasonality)\n\n\ndef sinusoidal_seasonality(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n degrees_seasonality: int = 2,\n frequency: int = 52,\n) -> jnp.ndarray:\n \"\"\"Calculates cyclic variation seasonality.\n\n For detailed info check:\n https://en.wikipedia.org/wiki/Seasonality#Modeling\n\n Args:\n data: Data for which the seasonality will be modelled for. It is used to\n obtain the length of the time dimension, axis 0.\n custom_priors: The custom priors we want the model to take instead of\n default ones.\n degrees_seasonality: Number of degrees to use. Must be greater or equal than\n 1.\n frequency: Frecuency of the seasonality be in computed. By default is 52 for\n weekly data (52 weeks in a year).\n\n Returns:\n An array with the seasonality values.\n \"\"\"\n number_periods = data.shape[0]\n default_priors = priors.get_default_priors()\n n_geos = core_utils.get_number_geos(data=data)\n with numpyro.plate(name=f\"{priors.GAMMA_SEASONALITY}_sin_cos_plate\", size=2):\n with numpyro.plate(\n name=f\"{priors.GAMMA_SEASONALITY}_plate\", size=degrees_seasonality):\n gamma_seasonality = numpyro.sample(\n name=priors.GAMMA_SEASONALITY,\n fn=custom_priors.get(priors.GAMMA_SEASONALITY,\n default_priors[priors.GAMMA_SEASONALITY]))\n seasonality_arange = jnp.expand_dims(a=jnp.arange(number_periods), axis=-1)\n degrees_arange = jnp.arange(degrees_seasonality)\n seasonality_values = _sinusoidal_seasonality(\n seasonality_arange=seasonality_arange,\n degrees_arange=degrees_arange,\n frequency=frequency,\n gamma_seasonality=gamma_seasonality,\n )\n if n_geos > 1:\n seasonality_values = jnp.expand_dims(seasonality_values, axis=-1)\n return seasonality_values\n\n\ndef _intra_week_seasonality(\n data: jnp.ndarray,\n weekday: jnp.ndarray,\n) -> jnp.ndarray:\n data_size = data.shape[0]\n return weekday[jnp.arange(data_size) % 7]\n\n\ndef intra_week_seasonality(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n) -> jnp.ndarray:\n \"\"\"Models intra week seasonality.\n\n Args:\n data: Data for which the seasonality will be modelled for. It is used to\n obtain the length of the time dimension, axis 0.\n custom_priors: The custom priors we want the model to take instead of\n default ones.\n\n Returns:\n The contribution of the weekday seasonality.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/14", "ground_truth": " default_priors = priors.get_default_priors()\n with numpyro.plate(name=f\"{priors.WEEKDAY}_plate\", size=7):\n weekday = numpyro.sample(\n name=priors.WEEKDAY,\n fn=custom_priors.get(priors.WEEKDAY, default_priors[priors.WEEKDAY]))\n\n weekday_series = _intra_week_seasonality(data=data, weekday=weekday)\n\n if data.ndim == 3: # For geo model's case\n weekday_series = jnp.expand_dims(weekday_series, axis=-1)\n\n return weekday_series\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "seasonality.py"], "context_start_lineno": 0, "lineno": 130, "function_name": "intra_week_seasonality"}, "groundtruth": " default_priors = priors.get_default_priors()\n with numpyro.plate(name=f\"{priors.WEEKDAY}_plate\", size=7):\n weekday = numpyro.sample(\n name=priors.WEEKDAY,\n fn=custom_priors.get(priors.WEEKDAY, default_priors[priors.WEEKDAY]))\n\n weekday_series = _intra_week_seasonality(data=data, weekday=weekday)\n\n if data.ndim == 3: # For geo model's case\n weekday_series = jnp.expand_dims(weekday_series, axis=-1)\n\n return weekday_series\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Core and modelling functions for trend.\"\"\"\n\nimport functools\nfrom typing import Mapping\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\n\n\n@jax.jit\ndef _trend_with_exponent(coef_trend: jnp.ndarray, trend: jnp.ndarray,\n expo_trend: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies the coefficient and exponent to the trend to obtain trend values.\n\n Args:\n coef_trend: Coefficient to be multiplied by the trend.\n trend: Initial trend values.\n expo_trend: Exponent to be applied to the trend.\n\n Returns:\n The trend values generated.\n \"\"\"\n return coef_trend * trend**expo_trend\n\n\ndef trend_with_exponent(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n) -> jnp.ndarray:\n \"\"\"Trend with exponent for curvature.\n\n Args:\n data: Data for which trend will be created.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. See our custom_priors documentation for details about the\n API and possible options.\n\n Returns:\n The values of the trend.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/15", "ground_truth": " default_priors = priors.get_default_priors()\n n_geos = core_utils.get_number_geos(data=data)\n # TODO(): Force all geos to have the same trend sign.\n with numpyro.plate(name=f\"{priors.COEF_TREND}_plate\", size=n_geos):\n coef_trend = numpyro.sample(\n name=priors.COEF_TREND,\n fn=custom_priors.get(priors.COEF_TREND,\n default_priors[priors.COEF_TREND]))\n\n expo_trend = numpyro.sample(\n name=priors.EXPO_TREND,\n fn=custom_priors.get(priors.EXPO_TREND,\n default_priors[priors.EXPO_TREND]))\n linear_trend = jnp.arange(data.shape[0])\n if n_geos > 1: # For geo model's case\n linear_trend = jnp.expand_dims(linear_trend, axis=-1)\n return _trend_with_exponent(\n coef_trend=coef_trend, trend=linear_trend, expo_trend=expo_trend)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "trend.py"], "context_start_lineno": 0, "lineno": 59, "function_name": "trend_with_exponent"}, "groundtruth": " default_priors = priors.get_default_priors()\n n_geos = core_utils.get_number_geos(data=data)\n # TODO(): Force all geos to have the same trend sign.\n with numpyro.plate(name=f\"{priors.COEF_TREND}_plate\", size=n_geos):\n coef_trend = numpyro.sample(\n name=priors.COEF_TREND,\n fn=custom_priors.get(priors.COEF_TREND,\n default_priors[priors.COEF_TREND]))\n\n expo_trend = numpyro.sample(\n name=priors.EXPO_TREND,\n fn=custom_priors.get(priors.EXPO_TREND,\n default_priors[priors.EXPO_TREND]))\n linear_trend = jnp.arange(data.shape[0])\n if n_geos > 1: # For geo model's case\n linear_trend = jnp.expand_dims(linear_trend, axis=-1)\n return _trend_with_exponent(\n coef_trend=coef_trend, trend=linear_trend, expo_trend=expo_trend)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Core and modelling functions for trend.\"\"\"\n\nimport functools\nfrom typing import Mapping\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\n\n\n@jax.jit\ndef _trend_with_exponent(coef_trend: jnp.ndarray, trend: jnp.ndarray,\n expo_trend: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies the coefficient and exponent to the trend to obtain trend values.\n\n Args:\n coef_trend: Coefficient to be multiplied by the trend.\n trend: Initial trend values.\n expo_trend: Exponent to be applied to the trend.\n\n Returns:\n The trend values generated.\n \"\"\"\n return coef_trend * trend**expo_trend\n\n\ndef trend_with_exponent(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n) -> jnp.ndarray:\n \"\"\"Trend with exponent for curvature.\n\n Args:\n data: Data for which trend will be created.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. See our custom_priors documentation for details about the\n API and possible options.\n\n Returns:\n The values of the trend.\n \"\"\"\n default_priors = priors.get_default_priors()\n n_geos = core_utils.get_number_geos(data=data)\n # TODO(): Force all geos to have the same trend sign.\n with numpyro.plate(name=f\"{priors.COEF_TREND}_plate\", size=n_geos):\n coef_trend = numpyro.sample(\n name=priors.COEF_TREND,\n fn=custom_priors.get(priors.COEF_TREND,\n default_priors[priors.COEF_TREND]))\n\n expo_trend = numpyro.sample(\n name=priors.EXPO_TREND,\n fn=custom_priors.get(priors.EXPO_TREND,\n default_priors[priors.EXPO_TREND]))\n linear_trend = jnp.arange(data.shape[0])\n if n_geos > 1: # For geo model's case\n linear_trend = jnp.expand_dims(linear_trend, axis=-1)\n return _trend_with_exponent(\n coef_trend=coef_trend, trend=linear_trend, expo_trend=expo_trend)\n\n\n@functools.partial(jax.jit, static_argnames=(\"number_periods\",))\ndef _dynamic_trend(\n number_periods: int,\n random_walk_level: jnp.ndarray,\n random_walk_slope: jnp.ndarray,\n initial_level: jnp.ndarray,\n initial_slope: jnp.ndarray,\n variance_level: jnp.ndarray,\n variance_slope: jnp.ndarray,\n) -> jnp.ndarray:\n \"\"\"Calculates dynamic trend using local linear trend method.\n\n More details about this function can be found in:\n https://storage.googleapis.com/pub-tools-public-publication-data/pdf/41854.pdf\n\n Args:\n number_periods: Number of time periods in the data.\n random_walk_level: Random walk of level from sample.\n random_walk_slope: Random walk of slope from sample.\n initial_level: The initial value for level in local linear trend model.\n initial_slope: The initial value for slope in local linear trend model.\n variance_level: The variance of the expected increase in level between time.\n variance_slope: The variance of the expected increase in slope between time.\n\n Returns:\n The dynamic trend values for the given data with the given parameters.\n \"\"\"\n # Simulate gaussian random walk of level with initial level.", "metadata": {"task_id": "google--lightweight_mmm/16", "ground_truth": " random_level = variance_level * random_walk_level\n random_level_with_initial_level = jnp.concatenate(\n [jnp.array([random_level[0] + initial_level]), random_level[1:]])\n level_trend_t = jnp.cumsum(random_level_with_initial_level, axis=0)\n # Simulate gaussian random walk of slope with initial slope.\n random_slope = variance_slope * random_walk_slope\n random_slope_with_initial_slope = jnp.concatenate(\n [jnp.array([random_slope[0] + initial_slope]), random_slope[1:]])\n slope_trend_t = jnp.cumsum(random_slope_with_initial_slope, axis=0)\n # Accumulate sum of slope series to address latent variable slope in function\n # level_t = level_t-1 + slope_t-1.\n initial_zero_shape = [(1, 0)] if slope_trend_t.ndim == 1 else [(1, 0), (0, 0)]\n slope_trend_cumsum = jnp.pad(\n jnp.cumsum(slope_trend_t, axis=0)[:number_periods - 1],\n initial_zero_shape, mode=\"constant\", constant_values=0)\n return level_trend_t + slope_trend_cumsum\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "time", "trend.py"], "context_start_lineno": 0, "lineno": 107, "function_name": "_dynamic_trend"}, "groundtruth": " random_level = variance_level * random_walk_level\n random_level_with_initial_level = jnp.concatenate(\n [jnp.array([random_level[0] + initial_level]), random_level[1:]])\n level_trend_t = jnp.cumsum(random_level_with_initial_level, axis=0)\n # Simulate gaussian random walk of slope with initial slope.\n random_slope = variance_slope * random_walk_slope\n random_slope_with_initial_slope = jnp.concatenate(\n [jnp.array([random_slope[0] + initial_slope]), random_slope[1:]])\n slope_trend_t = jnp.cumsum(random_slope_with_initial_slope, axis=0)\n # Accumulate sum of slope series to address latent variable slope in function\n # level_t = level_t-1 + slope_t-1.\n initial_zero_shape = [(1, 0)] if slope_trend_t.ndim == 1 else [(1, 0), (0, 0)]\n slope_trend_cumsum = jnp.pad(\n jnp.cumsum(slope_trend_t, axis=0)[:number_periods - 1],\n initial_zero_shape, mode=\"constant\", constant_values=0)\n return level_trend_t + slope_trend_cumsum\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for modeling the intercept.\"\"\"\n\nfrom typing import Mapping\n\nimport immutabledict\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\n\n\ndef simple_intercept(\n data: jnp.ndarray,\n custom_priors: Mapping[str,\n dist.Distribution] = immutabledict.immutabledict(),\n) -> jnp.ndarray:\n \"\"\"Calculates a national or geo incercept.\n Note that this intercept is constant over time.\n\n Args:\n data: Media input data. Media data must have either 2 dims for national\n model or 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. Refer to the full documentation on custom priors for\n details.\n\n Returns:\n The values of the intercept.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/17", "ground_truth": " default_priors = priors.get_default_priors()\n n_geos = core_utils.get_number_geos(data=data)\n\n with numpyro.plate(name=f\"{priors.INTERCEPT}_plate\", size=n_geos):\n intercept = numpyro.sample(\n name=priors.INTERCEPT,\n fn=custom_priors.get(priors.INTERCEPT,\n default_priors[priors.INTERCEPT]),\n )\n return intercept\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "baseline", "intercept.py"], "context_start_lineno": 0, "lineno": 45, "function_name": "simple_intercept"}, "groundtruth": " default_priors = priors.get_default_priors()\n n_geos = core_utils.get_number_geos(data=data)\n\n with numpyro.plate(name=f\"{priors.INTERCEPT}_plate\", size=n_geos):\n intercept = numpyro.sample(\n name=priors.INTERCEPT,\n fn=custom_priors.get(priors.INTERCEPT,\n default_priors[priors.INTERCEPT]),\n )\n return intercept\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for intercept.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import handlers\nimport numpyro.distributions as dist\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core.baseline import intercept\n\n\nclass InterceptTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"national\",\n data_shape=(150, 3),\n ),\n dict(\n testcase_name=\"geo\",\n data_shape=(150, 3, 5),\n ),\n )\n def test_simple_intercept_produces_output_correct_shape(self, data_shape):\n\n def mock_model_function(data):", "metadata": {"task_id": "google--lightweight_mmm/18", "ground_truth": " numpyro.deterministic(\n \"intercept_values\",\n intercept.simple_intercept(data=data, custom_priors={}))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "baseline", "intercept_test.py"], "context_start_lineno": 0, "lineno": 44, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"intercept_values\",\n intercept.simple_intercept(data=data, custom_priors={}))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of core and modelling saturation functions.\"\"\"\n\nfrom typing import Mapping\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\n\n\n@jax.jit\ndef _hill(\n data: jnp.ndarray,\n half_max_effective_concentration: jnp.ndarray,\n slope: jnp.ndarray,\n) -> jnp.ndarray:\n \"\"\"Calculates the hill function for a given array of values.\n\n Refer to the following link for detailed information on this equation:\n https://en.wikipedia.org/wiki/Hill_equation_(biochemistry)\n\n Args:\n data: Input data.\n half_max_effective_concentration: ec50 value for the hill function.\n slope: Slope of the hill function.\n\n Returns:\n The hill values for the respective input data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/19", "ground_truth": " save_transform = core_utils.apply_exponent_safe(\n data=data / half_max_effective_concentration, exponent=-slope)\n return jnp.where(save_transform == 0, x=0, y=1. / (1 + save_transform))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "saturation.py"], "context_start_lineno": 0, "lineno": 45, "function_name": "_hill"}, "groundtruth": " save_transform = core_utils.apply_exponent_safe(\n data=data / half_max_effective_concentration, exponent=-slope)\n return jnp.where(save_transform == 0, x=0, y=1. / (1 + save_transform))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of core and modelling saturation functions.\"\"\"\n\nfrom typing import Mapping\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\n\n\n@jax.jit\ndef _hill(\n data: jnp.ndarray,\n half_max_effective_concentration: jnp.ndarray,\n slope: jnp.ndarray,\n) -> jnp.ndarray:\n \"\"\"Calculates the hill function for a given array of values.\n\n Refer to the following link for detailed information on this equation:\n https://en.wikipedia.org/wiki/Hill_equation_(biochemistry)\n\n Args:\n data: Input data.\n half_max_effective_concentration: ec50 value for the hill function.\n slope: Slope of the hill function.\n\n Returns:\n The hill values for the respective input data.\n \"\"\"\n save_transform = core_utils.apply_exponent_safe(\n data=data / half_max_effective_concentration, exponent=-slope)\n return jnp.where(save_transform == 0, x=0, y=1. / (1 + save_transform))\n\n\ndef hill(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n prefix: str = \"\",\n) -> jnp.ndarray:\n \"\"\"Transforms the input data with the adstock and hill functions.\n\n Args:\n data: Media data to be transformed. It is expected to have 2 dims for\n national models and 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. The possible names of parameters for hill_adstock and\n exponent are \"lag_weight\", \"half_max_effective_concentration\" and \"slope\".\n prefix: Prefix to use in the variable name for Numpyro.\n\n Returns:\n The transformed media data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/20", "ground_truth": " default_priors = priors.get_default_priors()\n\n with numpyro.plate(\n name=f\"{prefix}{priors.HALF_MAX_EFFECTIVE_CONCENTRATION}_plate\",\n size=data.shape[1]):\n half_max_effective_concentration = numpyro.sample(\n name=f\"{prefix}{priors.HALF_MAX_EFFECTIVE_CONCENTRATION}\",\n fn=custom_priors.get(\n priors.HALF_MAX_EFFECTIVE_CONCENTRATION,\n default_priors[priors.HALF_MAX_EFFECTIVE_CONCENTRATION]))\n\n with numpyro.plate(name=f\"{prefix}{priors.SLOPE}_plate\", size=data.shape[1]):\n slope = numpyro.sample(\n name=f\"{prefix}{priors.SLOPE}\",\n fn=custom_priors.get(priors.SLOPE, default_priors[priors.SLOPE]))\n\n if data.ndim == 3:\n half_max_effective_concentration = jnp.expand_dims(\n half_max_effective_concentration, axis=-1)\n slope = jnp.expand_dims(slope, axis=-1)\n\n return _hill(\n data=data,\n half_max_effective_concentration=half_max_effective_concentration,\n slope=slope)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "saturation.py"], "context_start_lineno": 0, "lineno": 69, "function_name": "hill"}, "groundtruth": " default_priors = priors.get_default_priors()\n\n with numpyro.plate(\n name=f\"{prefix}{priors.HALF_MAX_EFFECTIVE_CONCENTRATION}_plate\",\n size=data.shape[1]):\n half_max_effective_concentration = numpyro.sample(\n name=f\"{prefix}{priors.HALF_MAX_EFFECTIVE_CONCENTRATION}\",\n fn=custom_priors.get(\n priors.HALF_MAX_EFFECTIVE_CONCENTRATION,\n default_priors[priors.HALF_MAX_EFFECTIVE_CONCENTRATION]))\n\n with numpyro.plate(name=f\"{prefix}{priors.SLOPE}_plate\", size=data.shape[1]):\n slope = numpyro.sample(\n name=f\"{prefix}{priors.SLOPE}\",\n fn=custom_priors.get(priors.SLOPE, default_priors[priors.SLOPE]))\n\n if data.ndim == 3:\n half_max_effective_concentration = jnp.expand_dims(\n half_max_effective_concentration, axis=-1)\n slope = jnp.expand_dims(slope, axis=-1)\n\n return _hill(\n data=data,\n half_max_effective_concentration=half_max_effective_concentration,\n slope=slope)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of core and modelling saturation functions.\"\"\"\n\nfrom typing import Mapping\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm.core import core_utils\nfrom lightweight_mmm.core import priors\n\n\n@jax.jit\ndef _hill(\n data: jnp.ndarray,\n half_max_effective_concentration: jnp.ndarray,\n slope: jnp.ndarray,\n) -> jnp.ndarray:\n \"\"\"Calculates the hill function for a given array of values.\n\n Refer to the following link for detailed information on this equation:\n https://en.wikipedia.org/wiki/Hill_equation_(biochemistry)\n\n Args:\n data: Input data.\n half_max_effective_concentration: ec50 value for the hill function.\n slope: Slope of the hill function.\n\n Returns:\n The hill values for the respective input data.\n \"\"\"\n save_transform = core_utils.apply_exponent_safe(\n data=data / half_max_effective_concentration, exponent=-slope)\n return jnp.where(save_transform == 0, x=0, y=1. / (1 + save_transform))\n\n\ndef hill(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n prefix: str = \"\",\n) -> jnp.ndarray:\n \"\"\"Transforms the input data with the adstock and hill functions.\n\n Args:\n data: Media data to be transformed. It is expected to have 2 dims for\n national models and 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. The possible names of parameters for hill_adstock and\n exponent are \"lag_weight\", \"half_max_effective_concentration\" and \"slope\".\n prefix: Prefix to use in the variable name for Numpyro.\n\n Returns:\n The transformed media data.\n \"\"\"\n default_priors = priors.get_default_priors()\n\n with numpyro.plate(\n name=f\"{prefix}{priors.HALF_MAX_EFFECTIVE_CONCENTRATION}_plate\",\n size=data.shape[1]):\n half_max_effective_concentration = numpyro.sample(\n name=f\"{prefix}{priors.HALF_MAX_EFFECTIVE_CONCENTRATION}\",\n fn=custom_priors.get(\n priors.HALF_MAX_EFFECTIVE_CONCENTRATION,\n default_priors[priors.HALF_MAX_EFFECTIVE_CONCENTRATION]))\n\n with numpyro.plate(name=f\"{prefix}{priors.SLOPE}_plate\", size=data.shape[1]):\n slope = numpyro.sample(\n name=f\"{prefix}{priors.SLOPE}\",\n fn=custom_priors.get(priors.SLOPE, default_priors[priors.SLOPE]))\n\n if data.ndim == 3:\n half_max_effective_concentration = jnp.expand_dims(\n half_max_effective_concentration, axis=-1)\n slope = jnp.expand_dims(slope, axis=-1)\n\n return _hill(\n data=data,\n half_max_effective_concentration=half_max_effective_concentration,\n slope=slope)\n\n\ndef _exponent(data: jnp.ndarray, exponent_values: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies exponent to the given data.\"\"\"\n return core_utils.apply_exponent_safe(data=data, exponent=exponent_values)\n\n\ndef exponent(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n prefix: str = \"\",\n) -> jnp.ndarray:\n \"\"\"Transforms the input data with the carryover function and exponent.\n\n Args:\n data: Media data to be transformed. It is expected to have 2 dims for\n national models and 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones.\n prefix: Prefix to use in the variable name for Numpyro.\n\n Returns:\n The transformed media data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/21", "ground_truth": " default_priors = priors.get_default_priors()\n\n with numpyro.plate(\n name=f\"{prefix}{priors.EXPONENT}_plate\", size=data.shape[1]):\n exponent_values = numpyro.sample(\n name=f\"{prefix}{priors.EXPONENT}\",\n fn=custom_priors.get(priors.EXPONENT, default_priors[priors.EXPONENT]))\n\n if data.ndim == 3:\n exponent_values = jnp.expand_dims(exponent_values, axis=-1)\n return _exponent(data=data, exponent_values=exponent_values)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "saturation.py"], "context_start_lineno": 0, "lineno": 119, "function_name": "exponent"}, "groundtruth": " default_priors = priors.get_default_priors()\n\n with numpyro.plate(\n name=f\"{prefix}{priors.EXPONENT}_plate\", size=data.shape[1]):\n exponent_values = numpyro.sample(\n name=f\"{prefix}{priors.EXPONENT}\",\n fn=custom_priors.get(priors.EXPONENT, default_priors[priors.EXPONENT]))\n\n if data.ndim == 3:\n exponent_values = jnp.expand_dims(exponent_values, axis=-1)\n return _exponent(data=data, exponent_values=exponent_values)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of core and modelling lagging functions.\"\"\"\n\nimport functools\nfrom typing import Mapping, Union\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nimport numpyro.distributions as dist\nfrom lightweight_mmm.core import priors\n\n\n@functools.partial(jax.vmap, in_axes=(1, 1, None), out_axes=1)\ndef _carryover_convolve(data: jnp.ndarray, weights: jnp.ndarray,\n number_lags: int) -> jnp.ndarray:\n \"\"\"Applies the convolution between the data and the weights for the carryover.\n\n Args:\n data: Input data.\n weights: Window weights for the carryover.\n number_lags: Number of lags the window has.\n\n Returns:\n The result values from convolving the data and the weights with padding.\n \"\"\"\n window = jnp.concatenate([jnp.zeros(number_lags - 1), weights])\n return jax.scipy.signal.convolve(data, window, mode=\"same\") / weights.sum()\n\n\n@functools.partial(jax.jit, static_argnames=(\"number_lags\",))\ndef _carryover(\n data: jnp.ndarray,\n ad_effect_retention_rate: jnp.ndarray,\n peak_effect_delay: jnp.ndarray,\n number_lags: int,\n) -> jnp.ndarray:\n \"\"\"Calculates media carryover.\n\n More details about this function can be found in:\n https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46001.pdf\n\n Args:\n data: Input data. It is expected that data has either 2 dimensions for\n national models and 3 for geo models.\n ad_effect_retention_rate: Retention rate of the advertisement effect.\n Default is 0.5.\n peak_effect_delay: Delay of the peak effect in the carryover function.\n Default is 1.\n number_lags: Number of lags to include in the carryover calculation. Default\n is 13.\n\n Returns:\n The carryover values for the given data with the given parameters.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/22", "ground_truth": " lags_arange = jnp.expand_dims(\n jnp.arange(number_lags, dtype=jnp.float32), axis=-1)\n convolve_func = _carryover_convolve\n if data.ndim == 3:\n # Since _carryover_convolve is already vmaped in the decorator we only need\n # to vmap it once here to handle the geo level data. We keep the windows bi\n # dimensional also for three dims data and vmap over only the extra data\n # dimension.\n convolve_func = jax.vmap(\n fun=_carryover_convolve, in_axes=(2, None, None), out_axes=2)\n weights = ad_effect_retention_rate**((lags_arange - peak_effect_delay)**2)\n return convolve_func(data, weights, number_lags)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "lagging.py"], "context_start_lineno": 0, "lineno": 68, "function_name": "_carryover"}, "groundtruth": " lags_arange = jnp.expand_dims(\n jnp.arange(number_lags, dtype=jnp.float32), axis=-1)\n convolve_func = _carryover_convolve\n if data.ndim == 3:\n # Since _carryover_convolve is already vmaped in the decorator we only need\n # to vmap it once here to handle the geo level data. We keep the windows bi\n # dimensional also for three dims data and vmap over only the extra data\n # dimension.\n convolve_func = jax.vmap(\n fun=_carryover_convolve, in_axes=(2, None, None), out_axes=2)\n weights = ad_effect_retention_rate**((lags_arange - peak_effect_delay)**2)\n return convolve_func(data, weights, number_lags)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of core and modelling lagging functions.\"\"\"\n\nimport functools\nfrom typing import Mapping, Union\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nimport numpyro.distributions as dist\nfrom lightweight_mmm.core import priors\n\n\n@functools.partial(jax.vmap, in_axes=(1, 1, None), out_axes=1)\ndef _carryover_convolve(data: jnp.ndarray, weights: jnp.ndarray,\n number_lags: int) -> jnp.ndarray:\n \"\"\"Applies the convolution between the data and the weights for the carryover.\n\n Args:\n data: Input data.\n weights: Window weights for the carryover.\n number_lags: Number of lags the window has.\n\n Returns:\n The result values from convolving the data and the weights with padding.\n \"\"\"\n window = jnp.concatenate([jnp.zeros(number_lags - 1), weights])\n return jax.scipy.signal.convolve(data, window, mode=\"same\") / weights.sum()\n\n\n@functools.partial(jax.jit, static_argnames=(\"number_lags\",))\ndef _carryover(\n data: jnp.ndarray,\n ad_effect_retention_rate: jnp.ndarray,\n peak_effect_delay: jnp.ndarray,\n number_lags: int,\n) -> jnp.ndarray:\n \"\"\"Calculates media carryover.\n\n More details about this function can be found in:\n https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46001.pdf\n\n Args:\n data: Input data. It is expected that data has either 2 dimensions for\n national models and 3 for geo models.\n ad_effect_retention_rate: Retention rate of the advertisement effect.\n Default is 0.5.\n peak_effect_delay: Delay of the peak effect in the carryover function.\n Default is 1.\n number_lags: Number of lags to include in the carryover calculation. Default\n is 13.\n\n Returns:\n The carryover values for the given data with the given parameters.\n \"\"\"\n lags_arange = jnp.expand_dims(\n jnp.arange(number_lags, dtype=jnp.float32), axis=-1)\n convolve_func = _carryover_convolve\n if data.ndim == 3:\n # Since _carryover_convolve is already vmaped in the decorator we only need\n # to vmap it once here to handle the geo level data. We keep the windows bi\n # dimensional also for three dims data and vmap over only the extra data\n # dimension.\n convolve_func = jax.vmap(\n fun=_carryover_convolve, in_axes=(2, None, None), out_axes=2)\n weights = ad_effect_retention_rate**((lags_arange - peak_effect_delay)**2)\n return convolve_func(data, weights, number_lags)\n\n\ndef carryover(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n number_lags: int = 13,\n prefix: str = \"\",\n) -> jnp.ndarray:\n \"\"\"Transforms the input data with the carryover function.\n\n Args:\n data: Media data to be transformed. It is expected to have 2 dims for\n national models and 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones.\n number_lags: Number of lags for the carryover function.\n prefix: Prefix to use in the variable name for Numpyro.\n\n Returns:\n The transformed media data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/23", "ground_truth": " default_priors = priors.get_default_priors()\n with numpyro.plate(\n name=f\"{prefix}{priors.AD_EFFECT_RETENTION_RATE}_plate\",\n size=data.shape[1]):\n ad_effect_retention_rate = numpyro.sample(\n name=f\"{prefix}{priors.AD_EFFECT_RETENTION_RATE}\",\n fn=custom_priors.get(priors.AD_EFFECT_RETENTION_RATE,\n default_priors[priors.AD_EFFECT_RETENTION_RATE]))\n\n with numpyro.plate(\n name=f\"{prefix}{priors.PEAK_EFFECT_DELAY}_plate\", size=data.shape[1]):\n peak_effect_delay = numpyro.sample(\n name=f\"{prefix}{priors.PEAK_EFFECT_DELAY}\",\n fn=custom_priors.get(priors.PEAK_EFFECT_DELAY,\n default_priors[priors.PEAK_EFFECT_DELAY]))\n\n return _carryover(\n data=data,\n ad_effect_retention_rate=ad_effect_retention_rate,\n peak_effect_delay=peak_effect_delay,\n number_lags=number_lags)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "lagging.py"], "context_start_lineno": 0, "lineno": 102, "function_name": "carryover"}, "groundtruth": " default_priors = priors.get_default_priors()\n with numpyro.plate(\n name=f\"{prefix}{priors.AD_EFFECT_RETENTION_RATE}_plate\",\n size=data.shape[1]):\n ad_effect_retention_rate = numpyro.sample(\n name=f\"{prefix}{priors.AD_EFFECT_RETENTION_RATE}\",\n fn=custom_priors.get(priors.AD_EFFECT_RETENTION_RATE,\n default_priors[priors.AD_EFFECT_RETENTION_RATE]))\n\n with numpyro.plate(\n name=f\"{prefix}{priors.PEAK_EFFECT_DELAY}_plate\", size=data.shape[1]):\n peak_effect_delay = numpyro.sample(\n name=f\"{prefix}{priors.PEAK_EFFECT_DELAY}\",\n fn=custom_priors.get(priors.PEAK_EFFECT_DELAY,\n default_priors[priors.PEAK_EFFECT_DELAY]))\n\n return _carryover(\n data=data,\n ad_effect_retention_rate=ad_effect_retention_rate,\n peak_effect_delay=peak_effect_delay,\n number_lags=number_lags)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of core and modelling lagging functions.\"\"\"\n\nimport functools\nfrom typing import Mapping, Union\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nimport numpyro.distributions as dist\nfrom lightweight_mmm.core import priors\n\n\n@functools.partial(jax.vmap, in_axes=(1, 1, None), out_axes=1)\ndef _carryover_convolve(data: jnp.ndarray, weights: jnp.ndarray,\n number_lags: int) -> jnp.ndarray:\n \"\"\"Applies the convolution between the data and the weights for the carryover.\n\n Args:\n data: Input data.\n weights: Window weights for the carryover.\n number_lags: Number of lags the window has.\n\n Returns:\n The result values from convolving the data and the weights with padding.\n \"\"\"\n window = jnp.concatenate([jnp.zeros(number_lags - 1), weights])\n return jax.scipy.signal.convolve(data, window, mode=\"same\") / weights.sum()\n\n\n@functools.partial(jax.jit, static_argnames=(\"number_lags\",))\ndef _carryover(\n data: jnp.ndarray,\n ad_effect_retention_rate: jnp.ndarray,\n peak_effect_delay: jnp.ndarray,\n number_lags: int,\n) -> jnp.ndarray:\n \"\"\"Calculates media carryover.\n\n More details about this function can be found in:\n https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46001.pdf\n\n Args:\n data: Input data. It is expected that data has either 2 dimensions for\n national models and 3 for geo models.\n ad_effect_retention_rate: Retention rate of the advertisement effect.\n Default is 0.5.\n peak_effect_delay: Delay of the peak effect in the carryover function.\n Default is 1.\n number_lags: Number of lags to include in the carryover calculation. Default\n is 13.\n\n Returns:\n The carryover values for the given data with the given parameters.\n \"\"\"\n lags_arange = jnp.expand_dims(\n jnp.arange(number_lags, dtype=jnp.float32), axis=-1)\n convolve_func = _carryover_convolve\n if data.ndim == 3:\n # Since _carryover_convolve is already vmaped in the decorator we only need\n # to vmap it once here to handle the geo level data. We keep the windows bi\n # dimensional also for three dims data and vmap over only the extra data\n # dimension.\n convolve_func = jax.vmap(\n fun=_carryover_convolve, in_axes=(2, None, None), out_axes=2)\n weights = ad_effect_retention_rate**((lags_arange - peak_effect_delay)**2)\n return convolve_func(data, weights, number_lags)\n\n\ndef carryover(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n number_lags: int = 13,\n prefix: str = \"\",\n) -> jnp.ndarray:\n \"\"\"Transforms the input data with the carryover function.\n\n Args:\n data: Media data to be transformed. It is expected to have 2 dims for\n national models and 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones.\n number_lags: Number of lags for the carryover function.\n prefix: Prefix to use in the variable name for Numpyro.\n\n Returns:\n The transformed media data.\n \"\"\"\n default_priors = priors.get_default_priors()\n with numpyro.plate(\n name=f\"{prefix}{priors.AD_EFFECT_RETENTION_RATE}_plate\",\n size=data.shape[1]):\n ad_effect_retention_rate = numpyro.sample(\n name=f\"{prefix}{priors.AD_EFFECT_RETENTION_RATE}\",\n fn=custom_priors.get(priors.AD_EFFECT_RETENTION_RATE,\n default_priors[priors.AD_EFFECT_RETENTION_RATE]))\n\n with numpyro.plate(\n name=f\"{prefix}{priors.PEAK_EFFECT_DELAY}_plate\", size=data.shape[1]):\n peak_effect_delay = numpyro.sample(\n name=f\"{prefix}{priors.PEAK_EFFECT_DELAY}\",\n fn=custom_priors.get(priors.PEAK_EFFECT_DELAY,\n default_priors[priors.PEAK_EFFECT_DELAY]))\n\n return _carryover(\n data=data,\n ad_effect_retention_rate=ad_effect_retention_rate,\n peak_effect_delay=peak_effect_delay,\n number_lags=number_lags)\n\n\n@jax.jit\ndef _adstock(\n data: jnp.ndarray,\n lag_weight: Union[float, jnp.ndarray] = .9,\n normalise: bool = True,\n) -> jnp.ndarray:\n \"\"\"Calculates the adstock value of a given array.\n\n To learn more about advertising lag:\n https://en.wikipedia.org/wiki/Advertising_adstock\n\n Args:\n data: Input array.\n lag_weight: lag_weight effect of the adstock function. Default is 0.9.\n normalise: Whether to normalise the output value. This normalization will\n divide the output values by (1 / (1 - lag_weight)).\n\n Returns:\n The adstock output of the input array.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/24", "ground_truth": " def adstock_internal(\n prev_adstock: jnp.ndarray,\n data: jnp.ndarray,\n lag_weight: Union[float, jnp.ndarray] = lag_weight,\n ) -> jnp.ndarray:\n adstock_value = prev_adstock * lag_weight + data\n return adstock_value, adstock_value# jax-ndarray\n\n _, adstock_values = jax.lax.scan(\n f=adstock_internal, init=data[0, ...], xs=data[1:, ...])\n adstock_values = jnp.concatenate([jnp.array([data[0, ...]]), adstock_values])\n return jax.lax.cond(\n normalise,\n lambda adstock_values: adstock_values / (1. / (1 - lag_weight)),\n lambda adstock_values: adstock_values,\n operand=adstock_values)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "lagging.py"], "context_start_lineno": 0, "lineno": 146, "function_name": "_adstock"}, "groundtruth": " def adstock_internal(\n prev_adstock: jnp.ndarray,\n data: jnp.ndarray,\n lag_weight: Union[float, jnp.ndarray] = lag_weight,\n ) -> jnp.ndarray:\n adstock_value = prev_adstock * lag_weight + data\n return adstock_value, adstock_value# jax-ndarray\n\n _, adstock_values = jax.lax.scan(\n f=adstock_internal, init=data[0, ...], xs=data[1:, ...])\n adstock_values = jnp.concatenate([jnp.array([data[0, ...]]), adstock_values])\n return jax.lax.cond(\n normalise,\n lambda adstock_values: adstock_values / (1. / (1 - lag_weight)),\n lambda adstock_values: adstock_values,\n operand=adstock_values)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of core and modelling lagging functions.\"\"\"\n\nimport functools\nfrom typing import Mapping, Union\n\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nimport numpyro.distributions as dist\nfrom lightweight_mmm.core import priors\n\n\n@functools.partial(jax.vmap, in_axes=(1, 1, None), out_axes=1)\ndef _carryover_convolve(data: jnp.ndarray, weights: jnp.ndarray,\n number_lags: int) -> jnp.ndarray:\n \"\"\"Applies the convolution between the data and the weights for the carryover.\n\n Args:\n data: Input data.\n weights: Window weights for the carryover.\n number_lags: Number of lags the window has.\n\n Returns:\n The result values from convolving the data and the weights with padding.\n \"\"\"\n window = jnp.concatenate([jnp.zeros(number_lags - 1), weights])\n return jax.scipy.signal.convolve(data, window, mode=\"same\") / weights.sum()\n\n\n@functools.partial(jax.jit, static_argnames=(\"number_lags\",))\ndef _carryover(\n data: jnp.ndarray,\n ad_effect_retention_rate: jnp.ndarray,\n peak_effect_delay: jnp.ndarray,\n number_lags: int,\n) -> jnp.ndarray:\n \"\"\"Calculates media carryover.\n\n More details about this function can be found in:\n https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46001.pdf\n\n Args:\n data: Input data. It is expected that data has either 2 dimensions for\n national models and 3 for geo models.\n ad_effect_retention_rate: Retention rate of the advertisement effect.\n Default is 0.5.\n peak_effect_delay: Delay of the peak effect in the carryover function.\n Default is 1.\n number_lags: Number of lags to include in the carryover calculation. Default\n is 13.\n\n Returns:\n The carryover values for the given data with the given parameters.\n \"\"\"\n lags_arange = jnp.expand_dims(\n jnp.arange(number_lags, dtype=jnp.float32), axis=-1)\n convolve_func = _carryover_convolve\n if data.ndim == 3:\n # Since _carryover_convolve is already vmaped in the decorator we only need\n # to vmap it once here to handle the geo level data. We keep the windows bi\n # dimensional also for three dims data and vmap over only the extra data\n # dimension.\n convolve_func = jax.vmap(\n fun=_carryover_convolve, in_axes=(2, None, None), out_axes=2)\n weights = ad_effect_retention_rate**((lags_arange - peak_effect_delay)**2)\n return convolve_func(data, weights, number_lags)\n\n\ndef carryover(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n number_lags: int = 13,\n prefix: str = \"\",\n) -> jnp.ndarray:\n \"\"\"Transforms the input data with the carryover function.\n\n Args:\n data: Media data to be transformed. It is expected to have 2 dims for\n national models and 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones.\n number_lags: Number of lags for the carryover function.\n prefix: Prefix to use in the variable name for Numpyro.\n\n Returns:\n The transformed media data.\n \"\"\"\n default_priors = priors.get_default_priors()\n with numpyro.plate(\n name=f\"{prefix}{priors.AD_EFFECT_RETENTION_RATE}_plate\",\n size=data.shape[1]):\n ad_effect_retention_rate = numpyro.sample(\n name=f\"{prefix}{priors.AD_EFFECT_RETENTION_RATE}\",\n fn=custom_priors.get(priors.AD_EFFECT_RETENTION_RATE,\n default_priors[priors.AD_EFFECT_RETENTION_RATE]))\n\n with numpyro.plate(\n name=f\"{prefix}{priors.PEAK_EFFECT_DELAY}_plate\", size=data.shape[1]):\n peak_effect_delay = numpyro.sample(\n name=f\"{prefix}{priors.PEAK_EFFECT_DELAY}\",\n fn=custom_priors.get(priors.PEAK_EFFECT_DELAY,\n default_priors[priors.PEAK_EFFECT_DELAY]))\n\n return _carryover(\n data=data,\n ad_effect_retention_rate=ad_effect_retention_rate,\n peak_effect_delay=peak_effect_delay,\n number_lags=number_lags)\n\n\n@jax.jit\ndef _adstock(\n data: jnp.ndarray,\n lag_weight: Union[float, jnp.ndarray] = .9,\n normalise: bool = True,\n) -> jnp.ndarray:\n \"\"\"Calculates the adstock value of a given array.\n\n To learn more about advertising lag:\n https://en.wikipedia.org/wiki/Advertising_adstock\n\n Args:\n data: Input array.\n lag_weight: lag_weight effect of the adstock function. Default is 0.9.\n normalise: Whether to normalise the output value. This normalization will\n divide the output values by (1 / (1 - lag_weight)).\n\n Returns:\n The adstock output of the input array.\n \"\"\"\n\n def adstock_internal(\n prev_adstock: jnp.ndarray,\n data: jnp.ndarray,\n lag_weight: Union[float, jnp.ndarray] = lag_weight,\n ) -> jnp.ndarray:\n adstock_value = prev_adstock * lag_weight + data\n return adstock_value, adstock_value# jax-ndarray\n\n _, adstock_values = jax.lax.scan(\n f=adstock_internal, init=data[0, ...], xs=data[1:, ...])\n adstock_values = jnp.concatenate([jnp.array([data[0, ...]]), adstock_values])\n return jax.lax.cond(\n normalise,\n lambda adstock_values: adstock_values / (1. / (1 - lag_weight)),\n lambda adstock_values: adstock_values,\n operand=adstock_values)\n\n\ndef adstock(\n data: jnp.ndarray,\n custom_priors: Mapping[str, dist.Distribution],\n *,\n normalise: bool = True,\n prefix: str = \"\",\n) -> jnp.ndarray:\n \"\"\"Transforms the input data with the adstock function and exponent.\n\n Args:\n data: Media data to be transformed. It is expected to have 2 dims for\n national models and 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. The possible names of parameters for adstock and exponent\n are \"lag_weight\" and \"exponent\".\n normalise: Whether to normalise the output values.\n prefix: Prefix to use in the variable name for Numpyro.\n\n Returns:\n The transformed media data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/25", "ground_truth": " default_priors = priors.get_default_priors()\n with numpyro.plate(\n name=f\"{prefix}{priors.LAG_WEIGHT}_plate\", size=data.shape[1]):\n lag_weight = numpyro.sample(\n name=f\"{prefix}{priors.LAG_WEIGHT}\",\n fn=custom_priors.get(priors.LAG_WEIGHT,\n default_priors[priors.LAG_WEIGHT]))\n\n if data.ndim == 3:\n lag_weight = jnp.expand_dims(lag_weight, axis=-1)\n\n return _adstock(data=data, lag_weight=lag_weight, normalise=normalise)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "lagging.py"], "context_start_lineno": 0, "lineno": 185, "function_name": "adstock"}, "groundtruth": " default_priors = priors.get_default_priors()\n with numpyro.plate(\n name=f\"{prefix}{priors.LAG_WEIGHT}_plate\", size=data.shape[1]):\n lag_weight = numpyro.sample(\n name=f\"{prefix}{priors.LAG_WEIGHT}\",\n fn=custom_priors.get(priors.LAG_WEIGHT,\n default_priors[priors.LAG_WEIGHT]))\n\n if data.ndim == 3:\n lag_weight = jnp.expand_dims(lag_weight, axis=-1)\n\n return _adstock(data=data, lag_weight=lag_weight, normalise=normalise)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for lagging.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro\nfrom numpyro import handlers\nimport numpyro.distributions as dist\n\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core.transformations import lagging\n\n\nclass LaggingTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"national\",\n data_shape=(150, 3),\n ad_effect_retention_rate_shape=(3,),\n peak_effect_delay_shape=(3,),\n number_lags=13,\n ),\n dict(\n testcase_name=\"geo\",\n data_shape=(150, 3, 5),\n ad_effect_retention_rate_shape=(3,),\n peak_effect_delay_shape=(3,),\n number_lags=13,\n ),\n )\n def test_core_carryover_produces_correct_shape(\n self,\n data_shape,\n ad_effect_retention_rate_shape,\n peak_effect_delay_shape,\n number_lags,\n ):\n data = jnp.ones(data_shape)\n ad_effect_retention_rate = jnp.ones(ad_effect_retention_rate_shape)\n peak_effect_delay = jnp.ones(peak_effect_delay_shape)\n\n output = lagging._carryover(\n data=data,\n ad_effect_retention_rate=ad_effect_retention_rate,\n peak_effect_delay=peak_effect_delay,\n number_lags=number_lags,\n )\n\n self.assertEqual(output.shape, data_shape)\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"national\",\n data_shape=(150, 3),\n ),\n dict(\n testcase_name=\"geo\",\n data_shape=(150, 3, 5),\n ),\n )\n def test_carryover_produces_correct_shape(self, data_shape):\n\n def mock_model_function(data, number_lags):", "metadata": {"task_id": "google--lightweight_mmm/26", "ground_truth": " numpyro.deterministic(\n \"carryover\",\n lagging.carryover(\n data=data, custom_priors={}, number_lags=number_lags))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "lagging_test.py"], "context_start_lineno": 0, "lineno": 80, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"carryover\",\n lagging.carryover(\n data=data, custom_priors={}, number_lags=number_lags))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for lagging.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro\nfrom numpyro import handlers\nimport numpyro.distributions as dist\n\nfrom lightweight_mmm.core import priors\nfrom lightweight_mmm.core.transformations import lagging\n\n\nclass LaggingTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"national\",\n data_shape=(150, 3),\n ad_effect_retention_rate_shape=(3,),\n peak_effect_delay_shape=(3,),\n number_lags=13,\n ),\n dict(\n testcase_name=\"geo\",\n data_shape=(150, 3, 5),\n ad_effect_retention_rate_shape=(3,),\n peak_effect_delay_shape=(3,),\n number_lags=13,\n ),\n )\n def test_core_carryover_produces_correct_shape(\n self,\n data_shape,\n ad_effect_retention_rate_shape,\n peak_effect_delay_shape,\n number_lags,\n ):\n data = jnp.ones(data_shape)\n ad_effect_retention_rate = jnp.ones(ad_effect_retention_rate_shape)\n peak_effect_delay = jnp.ones(peak_effect_delay_shape)\n\n output = lagging._carryover(\n data=data,\n ad_effect_retention_rate=ad_effect_retention_rate,\n peak_effect_delay=peak_effect_delay,\n number_lags=number_lags,\n )\n\n self.assertEqual(output.shape, data_shape)\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"national\",\n data_shape=(150, 3),\n ),\n dict(\n testcase_name=\"geo\",\n data_shape=(150, 3, 5),\n ),\n )\n def test_carryover_produces_correct_shape(self, data_shape):\n\n def mock_model_function(data, number_lags):\n numpyro.deterministic(\n \"carryover\",\n lagging.carryover(\n data=data, custom_priors={}, number_lags=number_lags))\n\n num_samples = 10\n data = jnp.ones(data_shape)\n number_lags = 15\n kernel = numpyro.infer.NUTS(model=mock_model_function)\n mcmc = numpyro.infer.MCMC(\n sampler=kernel, num_warmup=10, num_samples=num_samples, num_chains=1)\n rng_key = jax.random.PRNGKey(0)\n\n mcmc.run(rng_key, data=data, number_lags=number_lags)\n carryover_values = mcmc.get_samples()[\"carryover\"]\n\n self.assertEqual(carryover_values.shape, (num_samples, *data.shape))\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"ad_effect_retention_rate\",\n prior_name=priors.AD_EFFECT_RETENTION_RATE,\n ),\n dict(\n testcase_name=\"peak_effect_delay\",\n prior_name=priors.PEAK_EFFECT_DELAY,\n ),\n )\n def test_carryover_custom_priors_are_taken_correctly(self, prior_name):\n expected_value1, expected_value2 = 5.2, 7.56\n custom_priors = {\n prior_name:\n dist.Kumaraswamy(\n concentration1=expected_value1, concentration0=expected_value2)\n }\n media = jnp.ones((10, 5, 5))\n number_lags = 13\n\n trace_handler = handlers.trace(handlers.seed(lagging.carryover, rng_seed=0))\n trace = trace_handler.get_trace(\n data=media,\n custom_priors=custom_priors,\n number_lags=number_lags,\n )\n values_and_dists = {\n name: site[\"fn\"] for name, site in trace.items() if \"fn\" in site\n }\n\n used_distribution = values_and_dists[prior_name]\n used_distribution = used_distribution.base_dist\n self.assertIsInstance(used_distribution, dist.Kumaraswamy)\n self.assertEqual(used_distribution.concentration0, expected_value2)\n self.assertEqual(used_distribution.concentration1, expected_value1)\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"national\",\n data_shape=(150, 3),\n lag_weight_shape=(3,),\n ),\n dict(\n testcase_name=\"geo\",\n data_shape=(150, 3, 5),\n lag_weight_shape=(3, 1),\n ),\n )\n def test_core_adstock_produces_correct_shape(self, data_shape,\n lag_weight_shape):\n data = jnp.ones(data_shape)\n lag_weight = jnp.ones(lag_weight_shape)\n\n output = lagging._adstock(data=data, lag_weight=lag_weight)\n\n self.assertEqual(output.shape, data_shape)\n\n @parameterized.named_parameters(\n dict(\n testcase_name=\"national\",\n data_shape=(150, 3),\n ),\n dict(\n testcase_name=\"geo\",\n data_shape=(150, 3, 5),\n ),\n )\n def test_adstock_produces_correct_shape(self, data_shape):\n\n def mock_model_function(data, normalise):", "metadata": {"task_id": "google--lightweight_mmm/27", "ground_truth": " numpyro.deterministic(\n \"adstock\",\n lagging.adstock(data=data, custom_priors={}, normalise=normalise))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "core", "transformations", "lagging_test.py"], "context_start_lineno": 0, "lineno": 168, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"adstock\",\n lagging.adstock(data=data, custom_priors={}, normalise=normalise))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module containing the different models available in the lightweightMMM lib.\n\nCurrently this file contains a main model with three possible options for\nprocessing the media data. Which essentially grants the possibility of building\nthree different models.\n - Adstock\n - Hill-Adstock\n - Carryover\n\"\"\"\nimport sys\n# pylint: disable=g-import-not-at-top\nif sys.version_info >= (3, 8):\n from typing import Protocol\nelse:\n from typing_extensions import Protocol\n\nfrom typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence, Union\n\nimport immutabledict\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm import media_transforms\n\nPrior = Union[\n dist.Distribution,\n Dict[str, float],\n Sequence[float],\n float\n]\n\n\nclass TransformFunction(Protocol):\n\n def __call__(\n self,\n media_data: jnp.ndarray,\n custom_priors: MutableMapping[str, Prior],\n **kwargs: Any) -> jnp.ndarray:\n ...\n\n\n_INTERCEPT = \"intercept\"\n_COEF_TREND = \"coef_trend\"\n_EXPO_TREND = \"expo_trend\"\n_SIGMA = \"sigma\"\n_GAMMA_SEASONALITY = \"gamma_seasonality\"\n_WEEKDAY = \"weekday\"\n_COEF_EXTRA_FEATURES = \"coef_extra_features\"\n_COEF_SEASONALITY = \"coef_seasonality\"\n\nMODEL_PRIORS_NAMES = frozenset((\n _INTERCEPT,\n _COEF_TREND,\n _EXPO_TREND,\n _SIGMA,\n _GAMMA_SEASONALITY,\n _WEEKDAY,\n _COEF_EXTRA_FEATURES,\n _COEF_SEASONALITY))\n\n_EXPONENT = \"exponent\"\n_LAG_WEIGHT = \"lag_weight\"\n_HALF_MAX_EFFECTIVE_CONCENTRATION = \"half_max_effective_concentration\"\n_SLOPE = \"slope\"\n_AD_EFFECT_RETENTION_RATE = \"ad_effect_retention_rate\"\n_PEAK_EFFECT_DELAY = \"peak_effect_delay\"\n\nTRANSFORM_PRIORS_NAMES = immutabledict.immutabledict({\n \"carryover\":\n frozenset((_AD_EFFECT_RETENTION_RATE, _PEAK_EFFECT_DELAY, _EXPONENT)),\n \"adstock\":\n frozenset((_EXPONENT, _LAG_WEIGHT)),\n \"hill_adstock\":\n frozenset((_LAG_WEIGHT, _HALF_MAX_EFFECTIVE_CONCENTRATION, _SLOPE))\n})\n\nGEO_ONLY_PRIORS = frozenset((_COEF_SEASONALITY,))\n\n\ndef _get_default_priors() -> Mapping[str, Prior]:\n # Since JAX cannot be called before absl.app.run in tests we get default\n # priors from a function.", "metadata": {"task_id": "google--lightweight_mmm/28", "ground_truth": " return immutabledict.immutabledict({\n _INTERCEPT: dist.HalfNormal(scale=2.),\n _COEF_TREND: dist.Normal(loc=0., scale=1.),\n _EXPO_TREND: dist.Uniform(low=0.5, high=1.5),\n _SIGMA: dist.Gamma(concentration=1., rate=1.),\n _GAMMA_SEASONALITY: dist.Normal(loc=0., scale=1.),\n _WEEKDAY: dist.Normal(loc=0., scale=.5),\n _COEF_EXTRA_FEATURES: dist.Normal(loc=0., scale=1.),\n _COEF_SEASONALITY: dist.HalfNormal(scale=.5)\n })\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "models.py"], "context_start_lineno": 0, "lineno": 98, "function_name": "_get_default_priors"}, "groundtruth": " return immutabledict.immutabledict({\n _INTERCEPT: dist.HalfNormal(scale=2.),\n _COEF_TREND: dist.Normal(loc=0., scale=1.),\n _EXPO_TREND: dist.Uniform(low=0.5, high=1.5),\n _SIGMA: dist.Gamma(concentration=1., rate=1.),\n _GAMMA_SEASONALITY: dist.Normal(loc=0., scale=1.),\n _WEEKDAY: dist.Normal(loc=0., scale=.5),\n _COEF_EXTRA_FEATURES: dist.Normal(loc=0., scale=1.),\n _COEF_SEASONALITY: dist.HalfNormal(scale=.5)\n })\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module containing the different models available in the lightweightMMM lib.\n\nCurrently this file contains a main model with three possible options for\nprocessing the media data. Which essentially grants the possibility of building\nthree different models.\n - Adstock\n - Hill-Adstock\n - Carryover\n\"\"\"\nimport sys\n# pylint: disable=g-import-not-at-top\nif sys.version_info >= (3, 8):\n from typing import Protocol\nelse:\n from typing_extensions import Protocol\n\nfrom typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence, Union\n\nimport immutabledict\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm import media_transforms\n\nPrior = Union[\n dist.Distribution,\n Dict[str, float],\n Sequence[float],\n float\n]\n\n\nclass TransformFunction(Protocol):\n\n def __call__(\n self,\n media_data: jnp.ndarray,\n custom_priors: MutableMapping[str, Prior],\n **kwargs: Any) -> jnp.ndarray:\n ...\n\n\n_INTERCEPT = \"intercept\"\n_COEF_TREND = \"coef_trend\"\n_EXPO_TREND = \"expo_trend\"\n_SIGMA = \"sigma\"\n_GAMMA_SEASONALITY = \"gamma_seasonality\"\n_WEEKDAY = \"weekday\"\n_COEF_EXTRA_FEATURES = \"coef_extra_features\"\n_COEF_SEASONALITY = \"coef_seasonality\"\n\nMODEL_PRIORS_NAMES = frozenset((\n _INTERCEPT,\n _COEF_TREND,\n _EXPO_TREND,\n _SIGMA,\n _GAMMA_SEASONALITY,\n _WEEKDAY,\n _COEF_EXTRA_FEATURES,\n _COEF_SEASONALITY))\n\n_EXPONENT = \"exponent\"\n_LAG_WEIGHT = \"lag_weight\"\n_HALF_MAX_EFFECTIVE_CONCENTRATION = \"half_max_effective_concentration\"\n_SLOPE = \"slope\"\n_AD_EFFECT_RETENTION_RATE = \"ad_effect_retention_rate\"\n_PEAK_EFFECT_DELAY = \"peak_effect_delay\"\n\nTRANSFORM_PRIORS_NAMES = immutabledict.immutabledict({\n \"carryover\":\n frozenset((_AD_EFFECT_RETENTION_RATE, _PEAK_EFFECT_DELAY, _EXPONENT)),\n \"adstock\":\n frozenset((_EXPONENT, _LAG_WEIGHT)),\n \"hill_adstock\":\n frozenset((_LAG_WEIGHT, _HALF_MAX_EFFECTIVE_CONCENTRATION, _SLOPE))\n})\n\nGEO_ONLY_PRIORS = frozenset((_COEF_SEASONALITY,))\n\n\ndef _get_default_priors() -> Mapping[str, Prior]:\n # Since JAX cannot be called before absl.app.run in tests we get default\n # priors from a function.\n return immutabledict.immutabledict({\n _INTERCEPT: dist.HalfNormal(scale=2.),\n _COEF_TREND: dist.Normal(loc=0., scale=1.),\n _EXPO_TREND: dist.Uniform(low=0.5, high=1.5),\n _SIGMA: dist.Gamma(concentration=1., rate=1.),\n _GAMMA_SEASONALITY: dist.Normal(loc=0., scale=1.),\n _WEEKDAY: dist.Normal(loc=0., scale=.5),\n _COEF_EXTRA_FEATURES: dist.Normal(loc=0., scale=1.),\n _COEF_SEASONALITY: dist.HalfNormal(scale=.5)\n })\n\n\ndef _get_transform_default_priors() -> Mapping[str, Prior]:\n # Since JAX cannot be called before absl.app.run in tests we get default\n # priors from a function.", "metadata": {"task_id": "google--lightweight_mmm/29", "ground_truth": " return immutabledict.immutabledict({\n \"carryover\":\n immutabledict.immutabledict({\n _AD_EFFECT_RETENTION_RATE:\n dist.Beta(concentration1=1., concentration0=1.),\n _PEAK_EFFECT_DELAY:\n dist.HalfNormal(scale=2.),\n _EXPONENT:\n dist.Beta(concentration1=9., concentration0=1.)\n }),\n \"adstock\":\n immutabledict.immutabledict({\n _EXPONENT: dist.Beta(concentration1=9., concentration0=1.),\n _LAG_WEIGHT: dist.Beta(concentration1=2., concentration0=1.)\n }),\n \"hill_adstock\":\n immutabledict.immutabledict({\n _LAG_WEIGHT:\n dist.Beta(concentration1=2., concentration0=1.),\n _HALF_MAX_EFFECTIVE_CONCENTRATION:\n dist.Gamma(concentration=1., rate=1.),\n _SLOPE:\n dist.Gamma(concentration=1., rate=1.)\n })\n })\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "models.py"], "context_start_lineno": 0, "lineno": 113, "function_name": "_get_transform_default_priors"}, "groundtruth": " return immutabledict.immutabledict({\n \"carryover\":\n immutabledict.immutabledict({\n _AD_EFFECT_RETENTION_RATE:\n dist.Beta(concentration1=1., concentration0=1.),\n _PEAK_EFFECT_DELAY:\n dist.HalfNormal(scale=2.),\n _EXPONENT:\n dist.Beta(concentration1=9., concentration0=1.)\n }),\n \"adstock\":\n immutabledict.immutabledict({\n _EXPONENT: dist.Beta(concentration1=9., concentration0=1.),\n _LAG_WEIGHT: dist.Beta(concentration1=2., concentration0=1.)\n }),\n \"hill_adstock\":\n immutabledict.immutabledict({\n _LAG_WEIGHT:\n dist.Beta(concentration1=2., concentration0=1.),\n _HALF_MAX_EFFECTIVE_CONCENTRATION:\n dist.Gamma(concentration=1., rate=1.),\n _SLOPE:\n dist.Gamma(concentration=1., rate=1.)\n })\n })\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module containing the different models available in the lightweightMMM lib.\n\nCurrently this file contains a main model with three possible options for\nprocessing the media data. Which essentially grants the possibility of building\nthree different models.\n - Adstock\n - Hill-Adstock\n - Carryover\n\"\"\"\nimport sys\n# pylint: disable=g-import-not-at-top\nif sys.version_info >= (3, 8):\n from typing import Protocol\nelse:\n from typing_extensions import Protocol\n\nfrom typing import Any, Dict, Mapping, MutableMapping, Optional, Sequence, Union\n\nimport immutabledict\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\n\nfrom lightweight_mmm import media_transforms\n\nPrior = Union[\n dist.Distribution,\n Dict[str, float],\n Sequence[float],\n float\n]\n\n\nclass TransformFunction(Protocol):\n\n def __call__(\n self,\n media_data: jnp.ndarray,\n custom_priors: MutableMapping[str, Prior],\n **kwargs: Any) -> jnp.ndarray:\n ...\n\n\n_INTERCEPT = \"intercept\"\n_COEF_TREND = \"coef_trend\"\n_EXPO_TREND = \"expo_trend\"\n_SIGMA = \"sigma\"\n_GAMMA_SEASONALITY = \"gamma_seasonality\"\n_WEEKDAY = \"weekday\"\n_COEF_EXTRA_FEATURES = \"coef_extra_features\"\n_COEF_SEASONALITY = \"coef_seasonality\"\n\nMODEL_PRIORS_NAMES = frozenset((\n _INTERCEPT,\n _COEF_TREND,\n _EXPO_TREND,\n _SIGMA,\n _GAMMA_SEASONALITY,\n _WEEKDAY,\n _COEF_EXTRA_FEATURES,\n _COEF_SEASONALITY))\n\n_EXPONENT = \"exponent\"\n_LAG_WEIGHT = \"lag_weight\"\n_HALF_MAX_EFFECTIVE_CONCENTRATION = \"half_max_effective_concentration\"\n_SLOPE = \"slope\"\n_AD_EFFECT_RETENTION_RATE = \"ad_effect_retention_rate\"\n_PEAK_EFFECT_DELAY = \"peak_effect_delay\"\n\nTRANSFORM_PRIORS_NAMES = immutabledict.immutabledict({\n \"carryover\":\n frozenset((_AD_EFFECT_RETENTION_RATE, _PEAK_EFFECT_DELAY, _EXPONENT)),\n \"adstock\":\n frozenset((_EXPONENT, _LAG_WEIGHT)),\n \"hill_adstock\":\n frozenset((_LAG_WEIGHT, _HALF_MAX_EFFECTIVE_CONCENTRATION, _SLOPE))\n})\n\nGEO_ONLY_PRIORS = frozenset((_COEF_SEASONALITY,))\n\n\ndef _get_default_priors() -> Mapping[str, Prior]:\n # Since JAX cannot be called before absl.app.run in tests we get default\n # priors from a function.\n return immutabledict.immutabledict({\n _INTERCEPT: dist.HalfNormal(scale=2.),\n _COEF_TREND: dist.Normal(loc=0., scale=1.),\n _EXPO_TREND: dist.Uniform(low=0.5, high=1.5),\n _SIGMA: dist.Gamma(concentration=1., rate=1.),\n _GAMMA_SEASONALITY: dist.Normal(loc=0., scale=1.),\n _WEEKDAY: dist.Normal(loc=0., scale=.5),\n _COEF_EXTRA_FEATURES: dist.Normal(loc=0., scale=1.),\n _COEF_SEASONALITY: dist.HalfNormal(scale=.5)\n })\n\n\ndef _get_transform_default_priors() -> Mapping[str, Prior]:\n # Since JAX cannot be called before absl.app.run in tests we get default\n # priors from a function.\n return immutabledict.immutabledict({\n \"carryover\":\n immutabledict.immutabledict({\n _AD_EFFECT_RETENTION_RATE:\n dist.Beta(concentration1=1., concentration0=1.),\n _PEAK_EFFECT_DELAY:\n dist.HalfNormal(scale=2.),\n _EXPONENT:\n dist.Beta(concentration1=9., concentration0=1.)\n }),\n \"adstock\":\n immutabledict.immutabledict({\n _EXPONENT: dist.Beta(concentration1=9., concentration0=1.),\n _LAG_WEIGHT: dist.Beta(concentration1=2., concentration0=1.)\n }),\n \"hill_adstock\":\n immutabledict.immutabledict({\n _LAG_WEIGHT:\n dist.Beta(concentration1=2., concentration0=1.),\n _HALF_MAX_EFFECTIVE_CONCENTRATION:\n dist.Gamma(concentration=1., rate=1.),\n _SLOPE:\n dist.Gamma(concentration=1., rate=1.)\n })\n })\n\n\ndef transform_adstock(media_data: jnp.ndarray,\n custom_priors: MutableMapping[str, Prior],\n normalise: bool = True) -> jnp.ndarray:\n \"\"\"Transforms the input data with the adstock function and exponent.\n\n Args:\n media_data: Media data to be transformed. It is expected to have 2 dims for\n national models and 3 for geo models.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. The possible names of parameters for adstock and exponent\n are \"lag_weight\" and \"exponent\".\n normalise: Whether to normalise the output values.\n\n Returns:\n The transformed media data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/30", "ground_truth": " transform_default_priors = _get_transform_default_priors()[\"adstock\"]\n with numpyro.plate(name=f\"{_LAG_WEIGHT}_plate\",\n size=media_data.shape[1]):\n lag_weight = numpyro.sample(\n name=_LAG_WEIGHT,\n fn=custom_priors.get(_LAG_WEIGHT,\n transform_default_priors[_LAG_WEIGHT]))\n\n with numpyro.plate(name=f\"{_EXPONENT}_plate\",\n size=media_data.shape[1]):\n exponent = numpyro.sample(\n name=_EXPONENT,\n fn=custom_priors.get(_EXPONENT,\n transform_default_priors[_EXPONENT]))\n\n if media_data.ndim == 3:\n lag_weight = jnp.expand_dims(lag_weight, axis=-1)\n exponent = jnp.expand_dims(exponent, axis=-1)\n\n adstock = media_transforms.adstock(\n data=media_data, lag_weight=lag_weight, normalise=normalise)\n\n return media_transforms.apply_exponent_safe(data=adstock, exponent=exponent)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "models.py"], "context_start_lineno": 0, "lineno": 156, "function_name": "transform_adstock"}, "groundtruth": " transform_default_priors = _get_transform_default_priors()[\"adstock\"]\n with numpyro.plate(name=f\"{_LAG_WEIGHT}_plate\",\n size=media_data.shape[1]):\n lag_weight = numpyro.sample(\n name=_LAG_WEIGHT,\n fn=custom_priors.get(_LAG_WEIGHT,\n transform_default_priors[_LAG_WEIGHT]))\n\n with numpyro.plate(name=f\"{_EXPONENT}_plate\",\n size=media_data.shape[1]):\n exponent = numpyro.sample(\n name=_EXPONENT,\n fn=custom_priors.get(_EXPONENT,\n transform_default_priors[_EXPONENT]))\n\n if media_data.ndim == 3:\n lag_weight = jnp.expand_dims(lag_weight, axis=-1)\n exponent = jnp.expand_dims(exponent, axis=-1)\n\n adstock = media_transforms.adstock(\n data=media_data, lag_weight=lag_weight, normalise=normalise)\n\n return media_transforms.apply_exponent_safe(data=adstock, exponent=exponent)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for plot.\"\"\"\n\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax.numpy as jnp\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpyro.distributions as dist\nimport pandas as pd\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import models\nfrom lightweight_mmm import plot\nfrom lightweight_mmm import preprocessing\n\nMOCK_NATIONAL_TRACE = {\n \"coef_extra_features\": np.ones([10, 2]),\n \"coef_media\": np.ones([10, 5]),\n \"coef_trend\": np.ones([10, 1]),\n \"expo_trend\": np.ones([10, 1]),\n \"gamma_seasonality\": np.ones([10, 3, 2]),\n \"intercept\": np.ones([10, 1]),\n \"media_transformed\": np.ones([10, 50, 5,]),\n \"mu\": np.ones([10, 50]),\n \"sigma\": np.ones([10, 1]),\n \"ad_effect_retention_rate\": np.ones([10, 5]),\n \"exponent\": np.ones([10, 5]),\n \"half_max_effective_concentration\": np.ones([10, 5]),\n \"lag_weight\": np.ones([10, 5]),\n \"slope\": np.ones([10, 5]),\n \"peak_effect_delay\": np.ones([10, 5]),\n }\n\nMOCK_GEO_TRACE = {\n \"channel_coef_media\": np.ones([10, 5, 1]),\n \"coef_extra_features\": np.ones([10, 2, 3]),\n \"coef_media\": np.ones([10, 5, 3]),\n \"coef_seasonality\": np.ones([10, 3]),\n \"coef_trend\": np.ones([10, 3]),\n \"expo_trend\": np.ones([10, 1]),\n \"gamma_seasonality\": np.ones([10, 3, 2]),\n \"intercept\": np.ones([10, 3]),\n \"media_transformed\": np.ones([10, 50, 5, 3]),\n \"mu\": np.ones([10, 50, 3]),\n \"sigma\": np.ones([10, 3]),\n \"ad_effect_retention_rate\": np.ones([10, 5]),\n \"exponent\": np.ones([10, 5]),\n \"half_max_effective_concentration\": np.ones([10, 5]),\n \"lag_weight\": np.ones([10, 5]),\n \"peak_effect_delay\": np.ones([10, 5]),\n \"slope\": np.ones([10, 5]),\n}\n\n\ndef _set_up_mock_mmm(model_name: str,\n is_geo_model: bool) -> lightweight_mmm.LightweightMMM:\n \"\"\"Creates a mock LightweightMMM instance that acts like a fitted model.\n\n These instances are used when we want to run tests on more diverse ranges of\n models than the two standard national_mmm and geo_mmm defined below but don't\n need the unit tests to spend time actually running the model fits.\n\n Args:\n model_name: One of [\"adstock\", \"carryover\", or \"hill_adstock\"], specifying\n which model type should be used in the mock LightweightMMM.\n is_geo_model: Whether to create a geo-level model (True) or a national-level\n model (False).\n\n Returns:\n mmm: A LightweightMMM object that can be treated like a fitted model\n for plotting-related unit tests.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/31", "ground_truth": " initial_mock_trace = MOCK_GEO_TRACE if is_geo_model else MOCK_NATIONAL_TRACE\n all_model_names = {\"adstock\", \"carryover\", \"hill_adstock\"}\n model_items_to_delete = frozenset.union(*[\n models.TRANSFORM_PRIORS_NAMES[x]\n for x in all_model_names - {model_name}\n ]) - models.TRANSFORM_PRIORS_NAMES[model_name]\n mock_trace = {\n key: initial_mock_trace[key]\n for key in initial_mock_trace\n if key not in model_items_to_delete\n }\n mmm = lightweight_mmm.LightweightMMM(model_name=model_name)\n mmm.n_media_channels = 5\n mmm.n_geos = 3 if is_geo_model else 1\n mmm._media_prior = jnp.ones(5)\n mmm._weekday_seasonality = False\n mmm._degrees_seasonality = 3\n mmm.custom_priors = {}\n mmm._extra_features = None\n mmm.trace = mock_trace\n mmm.media = jnp.ones_like(mock_trace[\"media_transformed\"][0])\n mmm.media_names = [f\"channel_{i}\" for i in range(5)]\n return mmm\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot_test.py"], "context_start_lineno": 0, "lineno": 89, "function_name": "_set_up_mock_mmm"}, "groundtruth": " initial_mock_trace = MOCK_GEO_TRACE if is_geo_model else MOCK_NATIONAL_TRACE\n all_model_names = {\"adstock\", \"carryover\", \"hill_adstock\"}\n model_items_to_delete = frozenset.union(*[\n models.TRANSFORM_PRIORS_NAMES[x]\n for x in all_model_names - {model_name}\n ]) - models.TRANSFORM_PRIORS_NAMES[model_name]\n mock_trace = {\n key: initial_mock_trace[key]\n for key in initial_mock_trace\n if key not in model_items_to_delete\n }\n mmm = lightweight_mmm.LightweightMMM(model_name=model_name)\n mmm.n_media_channels = 5\n mmm.n_geos = 3 if is_geo_model else 1\n mmm._media_prior = jnp.ones(5)\n mmm._weekday_seasonality = False\n mmm._degrees_seasonality = 3\n mmm.custom_priors = {}\n mmm._extra_features = None\n mmm.trace = mock_trace\n mmm.media = jnp.ones_like(mock_trace[\"media_transformed\"][0])\n mmm.media_names = [f\"channel_{i}\" for i in range(5)]\n return mmm\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for plot.\"\"\"\n\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax.numpy as jnp\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpyro.distributions as dist\nimport pandas as pd\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import models\nfrom lightweight_mmm import plot\nfrom lightweight_mmm import preprocessing\n\nMOCK_NATIONAL_TRACE = {\n \"coef_extra_features\": np.ones([10, 2]),\n \"coef_media\": np.ones([10, 5]),\n \"coef_trend\": np.ones([10, 1]),\n \"expo_trend\": np.ones([10, 1]),\n \"gamma_seasonality\": np.ones([10, 3, 2]),\n \"intercept\": np.ones([10, 1]),\n \"media_transformed\": np.ones([10, 50, 5,]),\n \"mu\": np.ones([10, 50]),\n \"sigma\": np.ones([10, 1]),\n \"ad_effect_retention_rate\": np.ones([10, 5]),\n \"exponent\": np.ones([10, 5]),\n \"half_max_effective_concentration\": np.ones([10, 5]),\n \"lag_weight\": np.ones([10, 5]),\n \"slope\": np.ones([10, 5]),\n \"peak_effect_delay\": np.ones([10, 5]),\n }\n\nMOCK_GEO_TRACE = {\n \"channel_coef_media\": np.ones([10, 5, 1]),\n \"coef_extra_features\": np.ones([10, 2, 3]),\n \"coef_media\": np.ones([10, 5, 3]),\n \"coef_seasonality\": np.ones([10, 3]),\n \"coef_trend\": np.ones([10, 3]),\n \"expo_trend\": np.ones([10, 1]),\n \"gamma_seasonality\": np.ones([10, 3, 2]),\n \"intercept\": np.ones([10, 3]),\n \"media_transformed\": np.ones([10, 50, 5, 3]),\n \"mu\": np.ones([10, 50, 3]),\n \"sigma\": np.ones([10, 3]),\n \"ad_effect_retention_rate\": np.ones([10, 5]),\n \"exponent\": np.ones([10, 5]),\n \"half_max_effective_concentration\": np.ones([10, 5]),\n \"lag_weight\": np.ones([10, 5]),\n \"peak_effect_delay\": np.ones([10, 5]),\n \"slope\": np.ones([10, 5]),\n}\n\n\ndef _set_up_mock_mmm(model_name: str,\n is_geo_model: bool) -> lightweight_mmm.LightweightMMM:\n \"\"\"Creates a mock LightweightMMM instance that acts like a fitted model.\n\n These instances are used when we want to run tests on more diverse ranges of\n models than the two standard national_mmm and geo_mmm defined below but don't\n need the unit tests to spend time actually running the model fits.\n\n Args:\n model_name: One of [\"adstock\", \"carryover\", or \"hill_adstock\"], specifying\n which model type should be used in the mock LightweightMMM.\n is_geo_model: Whether to create a geo-level model (True) or a national-level\n model (False).\n\n Returns:\n mmm: A LightweightMMM object that can be treated like a fitted model\n for plotting-related unit tests.\n \"\"\"\n initial_mock_trace = MOCK_GEO_TRACE if is_geo_model else MOCK_NATIONAL_TRACE\n all_model_names = {\"adstock\", \"carryover\", \"hill_adstock\"}\n model_items_to_delete = frozenset.union(*[\n models.TRANSFORM_PRIORS_NAMES[x]\n for x in all_model_names - {model_name}\n ]) - models.TRANSFORM_PRIORS_NAMES[model_name]\n mock_trace = {\n key: initial_mock_trace[key]\n for key in initial_mock_trace\n if key not in model_items_to_delete\n }\n mmm = lightweight_mmm.LightweightMMM(model_name=model_name)\n mmm.n_media_channels = 5\n mmm.n_geos = 3 if is_geo_model else 1\n mmm._media_prior = jnp.ones(5)\n mmm._weekday_seasonality = False\n mmm._degrees_seasonality = 3\n mmm.custom_priors = {}\n mmm._extra_features = None\n mmm.trace = mock_trace\n mmm.media = jnp.ones_like(mock_trace[\"media_transformed\"][0])\n mmm.media_names = [f\"channel_{i}\" for i in range(5)]\n return mmm\n\n\nclass PlotTest(parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):", "metadata": {"task_id": "google--lightweight_mmm/32", "ground_truth": " super(PlotTest, cls).setUpClass()\n cls.national_mmm = lightweight_mmm.LightweightMMM()\n cls.national_mmm.fit(\n media=jnp.ones((50, 5)),\n target=jnp.ones(50),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.geo_mmm = lightweight_mmm.LightweightMMM()\n cls.geo_mmm.fit(\n media=jnp.ones((50, 5, 3)),\n target=jnp.ones((50, 3)),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.not_fitted_mmm = lightweight_mmm.LightweightMMM()\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot_test.py"], "context_start_lineno": 0, "lineno": 118, "function_name": "setUpClass"}, "groundtruth": " super(PlotTest, cls).setUpClass()\n cls.national_mmm = lightweight_mmm.LightweightMMM()\n cls.national_mmm.fit(\n media=jnp.ones((50, 5)),\n target=jnp.ones(50),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.geo_mmm = lightweight_mmm.LightweightMMM()\n cls.geo_mmm.fit(\n media=jnp.ones((50, 5, 3)),\n target=jnp.ones((50, 3)),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.not_fitted_mmm = lightweight_mmm.LightweightMMM()\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for plot.\"\"\"\n\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax.numpy as jnp\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport numpyro.distributions as dist\nimport pandas as pd\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import models\nfrom lightweight_mmm import plot\nfrom lightweight_mmm import preprocessing\n\nMOCK_NATIONAL_TRACE = {\n \"coef_extra_features\": np.ones([10, 2]),\n \"coef_media\": np.ones([10, 5]),\n \"coef_trend\": np.ones([10, 1]),\n \"expo_trend\": np.ones([10, 1]),\n \"gamma_seasonality\": np.ones([10, 3, 2]),\n \"intercept\": np.ones([10, 1]),\n \"media_transformed\": np.ones([10, 50, 5,]),\n \"mu\": np.ones([10, 50]),\n \"sigma\": np.ones([10, 1]),\n \"ad_effect_retention_rate\": np.ones([10, 5]),\n \"exponent\": np.ones([10, 5]),\n \"half_max_effective_concentration\": np.ones([10, 5]),\n \"lag_weight\": np.ones([10, 5]),\n \"slope\": np.ones([10, 5]),\n \"peak_effect_delay\": np.ones([10, 5]),\n }\n\nMOCK_GEO_TRACE = {\n \"channel_coef_media\": np.ones([10, 5, 1]),\n \"coef_extra_features\": np.ones([10, 2, 3]),\n \"coef_media\": np.ones([10, 5, 3]),\n \"coef_seasonality\": np.ones([10, 3]),\n \"coef_trend\": np.ones([10, 3]),\n \"expo_trend\": np.ones([10, 1]),\n \"gamma_seasonality\": np.ones([10, 3, 2]),\n \"intercept\": np.ones([10, 3]),\n \"media_transformed\": np.ones([10, 50, 5, 3]),\n \"mu\": np.ones([10, 50, 3]),\n \"sigma\": np.ones([10, 3]),\n \"ad_effect_retention_rate\": np.ones([10, 5]),\n \"exponent\": np.ones([10, 5]),\n \"half_max_effective_concentration\": np.ones([10, 5]),\n \"lag_weight\": np.ones([10, 5]),\n \"peak_effect_delay\": np.ones([10, 5]),\n \"slope\": np.ones([10, 5]),\n}\n\n\ndef _set_up_mock_mmm(model_name: str,\n is_geo_model: bool) -> lightweight_mmm.LightweightMMM:\n \"\"\"Creates a mock LightweightMMM instance that acts like a fitted model.\n\n These instances are used when we want to run tests on more diverse ranges of\n models than the two standard national_mmm and geo_mmm defined below but don't\n need the unit tests to spend time actually running the model fits.\n\n Args:\n model_name: One of [\"adstock\", \"carryover\", or \"hill_adstock\"], specifying\n which model type should be used in the mock LightweightMMM.\n is_geo_model: Whether to create a geo-level model (True) or a national-level\n model (False).\n\n Returns:\n mmm: A LightweightMMM object that can be treated like a fitted model\n for plotting-related unit tests.\n \"\"\"\n initial_mock_trace = MOCK_GEO_TRACE if is_geo_model else MOCK_NATIONAL_TRACE\n all_model_names = {\"adstock\", \"carryover\", \"hill_adstock\"}\n model_items_to_delete = frozenset.union(*[\n models.TRANSFORM_PRIORS_NAMES[x]\n for x in all_model_names - {model_name}\n ]) - models.TRANSFORM_PRIORS_NAMES[model_name]\n mock_trace = {\n key: initial_mock_trace[key]\n for key in initial_mock_trace\n if key not in model_items_to_delete\n }\n mmm = lightweight_mmm.LightweightMMM(model_name=model_name)\n mmm.n_media_channels = 5\n mmm.n_geos = 3 if is_geo_model else 1\n mmm._media_prior = jnp.ones(5)\n mmm._weekday_seasonality = False\n mmm._degrees_seasonality = 3\n mmm.custom_priors = {}\n mmm._extra_features = None\n mmm.trace = mock_trace\n mmm.media = jnp.ones_like(mock_trace[\"media_transformed\"][0])\n mmm.media_names = [f\"channel_{i}\" for i in range(5)]\n return mmm\n\n\nclass PlotTest(parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(PlotTest, cls).setUpClass()\n cls.national_mmm = lightweight_mmm.LightweightMMM()\n cls.national_mmm.fit(\n media=jnp.ones((50, 5)),\n target=jnp.ones(50),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.geo_mmm = lightweight_mmm.LightweightMMM()\n cls.geo_mmm.fit(\n media=jnp.ones((50, 5, 3)),\n target=jnp.ones((50, 3)),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.not_fitted_mmm = lightweight_mmm.LightweightMMM()\n\n def setUp(self):", "metadata": {"task_id": "google--lightweight_mmm/33", "ground_truth": " super().setUp()\n self.mock_ax_scatter = self.enter_context(\n mock.patch.object(plot.plt.Axes, \"scatter\", autospec=True))\n self.mock_sns_lineplot = self.enter_context(\n mock.patch.object(plot.sns, \"lineplot\", autospec=True))\n self.mock_plt_plot = self.enter_context(\n mock.patch.object(plot.plt.Axes, \"plot\", autospec=True))\n self.mock_plt_barplot = self.enter_context(\n mock.patch.object(plot.plt.Axes, \"bar\", autospec=True))\n self.mock_pd_area_plot = self.enter_context(\n mock.patch.object(plot.pd.DataFrame.plot, \"area\", autospec=True))\n self.mock_sns_kdeplot = self.enter_context(\n mock.patch.object(plot.sns, \"kdeplot\", autospec=True))\n self.mock_plt_ax_legend = self.enter_context(\n mock.patch.object(plot.plt.Axes, \"legend\", autospec=True))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "plot_test.py"], "context_start_lineno": 0, "lineno": 138, "function_name": "setUp"}, "groundtruth": " super().setUp()\n self.mock_ax_scatter = self.enter_context(\n mock.patch.object(plot.plt.Axes, \"scatter\", autospec=True))\n self.mock_sns_lineplot = self.enter_context(\n mock.patch.object(plot.sns, \"lineplot\", autospec=True))\n self.mock_plt_plot = self.enter_context(\n mock.patch.object(plot.plt.Axes, \"plot\", autospec=True))\n self.mock_plt_barplot = self.enter_context(\n mock.patch.object(plot.plt.Axes, \"bar\", autospec=True))\n self.mock_pd_area_plot = self.enter_context(\n mock.patch.object(plot.pd.DataFrame.plot, \"area\", autospec=True))\n self.mock_sns_kdeplot = self.enter_context(\n mock.patch.object(plot.sns, \"kdeplot\", autospec=True))\n self.mock_plt_ax_legend = self.enter_context(\n mock.patch.object(plot.plt.Axes, \"legend\", autospec=True))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for optimizing your media based on media mix models.\"\"\"\nimport functools\nfrom typing import Optional, Tuple, Union\nfrom absl import logging\nimport jax\nimport jax.numpy as jnp\nfrom scipy import optimize\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import preprocessing\n\n\n@functools.partial(\n jax.jit,\n static_argnames=(\"media_mix_model\", \"media_input_shape\", \"target_scaler\",\n \"media_scaler\"))\ndef _objective_function(extra_features: jnp.ndarray,\n media_mix_model: lightweight_mmm.LightweightMMM,\n media_input_shape: Tuple[int,\n int], media_gap: Optional[int],\n target_scaler: Optional[preprocessing.CustomScaler],\n media_scaler: preprocessing.CustomScaler,\n geo_ratio: jnp.array,\n seed: Optional[int],\n media_values: jnp.ndarray) -> jnp.float64:\n \"\"\"Objective function to calculate the sum of all predictions of the model.\n\n Args:\n extra_features: Extra features the model requires for prediction.\n media_mix_model: Media mix model to use. Must have a predict method to be\n used.\n media_input_shape: Input shape of the data required by the model to get\n predictions. This is needed since optimization might flatten some arrays\n and they need to be reshaped before running new predictions.\n media_gap: Media data gap between the end of training data and the start of\n the out of sample media given. Eg. if 100 weeks of data were used for\n training and prediction starts 2 months after training data finished we\n need to provide the 8 weeks missing between the training data and the\n prediction data so data transformations (adstock, carryover, ...) can take\n place correctly.\n target_scaler: Scaler that was used to scale the target before training.\n media_scaler: Scaler that was used to scale the media data before training.\n geo_ratio: The ratio to split channel media across geo. Should sum up to 1\n for each channel and should have shape (c, g).\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n media_values: Media values required by the model to run predictions.\n\n Returns:\n The negative value of the sum of all predictions.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/34", "ground_truth": " if hasattr(media_mix_model, \"n_geos\") and media_mix_model.n_geos > 1:\n media_values = geo_ratio * jnp.expand_dims(media_values, axis=-1)\n media_values = jnp.tile(\n media_values / media_input_shape[0], reps=media_input_shape[0])\n # Distribute budget of each channels across time.\n media_values = jnp.reshape(a=media_values, newshape=media_input_shape)\n media_values = media_scaler.transform(media_values)\n return -jnp.sum(\n media_mix_model.predict(\n media=media_values.reshape(media_input_shape),\n extra_features=extra_features,\n media_gap=media_gap,\n target_scaler=target_scaler,\n seed=seed).mean(axis=0))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "optimize_media.py"], "context_start_lineno": 0, "lineno": 66, "function_name": "_objective_function"}, "groundtruth": " if hasattr(media_mix_model, \"n_geos\") and media_mix_model.n_geos > 1:\n media_values = geo_ratio * jnp.expand_dims(media_values, axis=-1)\n media_values = jnp.tile(\n media_values / media_input_shape[0], reps=media_input_shape[0])\n # Distribute budget of each channels across time.\n media_values = jnp.reshape(a=media_values, newshape=media_input_shape)\n media_values = media_scaler.transform(media_values)\n return -jnp.sum(\n media_mix_model.predict(\n media=media_values.reshape(media_input_shape),\n extra_features=extra_features,\n media_gap=media_gap,\n target_scaler=target_scaler,\n seed=seed).mean(axis=0))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for optimizing your media based on media mix models.\"\"\"\nimport functools\nfrom typing import Optional, Tuple, Union\nfrom absl import logging\nimport jax\nimport jax.numpy as jnp\nfrom scipy import optimize\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import preprocessing\n\n\n@functools.partial(\n jax.jit,\n static_argnames=(\"media_mix_model\", \"media_input_shape\", \"target_scaler\",\n \"media_scaler\"))\ndef _objective_function(extra_features: jnp.ndarray,\n media_mix_model: lightweight_mmm.LightweightMMM,\n media_input_shape: Tuple[int,\n int], media_gap: Optional[int],\n target_scaler: Optional[preprocessing.CustomScaler],\n media_scaler: preprocessing.CustomScaler,\n geo_ratio: jnp.array,\n seed: Optional[int],\n media_values: jnp.ndarray) -> jnp.float64:\n \"\"\"Objective function to calculate the sum of all predictions of the model.\n\n Args:\n extra_features: Extra features the model requires for prediction.\n media_mix_model: Media mix model to use. Must have a predict method to be\n used.\n media_input_shape: Input shape of the data required by the model to get\n predictions. This is needed since optimization might flatten some arrays\n and they need to be reshaped before running new predictions.\n media_gap: Media data gap between the end of training data and the start of\n the out of sample media given. Eg. if 100 weeks of data were used for\n training and prediction starts 2 months after training data finished we\n need to provide the 8 weeks missing between the training data and the\n prediction data so data transformations (adstock, carryover, ...) can take\n place correctly.\n target_scaler: Scaler that was used to scale the target before training.\n media_scaler: Scaler that was used to scale the media data before training.\n geo_ratio: The ratio to split channel media across geo. Should sum up to 1\n for each channel and should have shape (c, g).\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n media_values: Media values required by the model to run predictions.\n\n Returns:\n The negative value of the sum of all predictions.\n \"\"\"\n if hasattr(media_mix_model, \"n_geos\") and media_mix_model.n_geos > 1:\n media_values = geo_ratio * jnp.expand_dims(media_values, axis=-1)\n media_values = jnp.tile(\n media_values / media_input_shape[0], reps=media_input_shape[0])\n # Distribute budget of each channels across time.\n media_values = jnp.reshape(a=media_values, newshape=media_input_shape)\n media_values = media_scaler.transform(media_values)\n return -jnp.sum(\n media_mix_model.predict(\n media=media_values.reshape(media_input_shape),\n extra_features=extra_features,\n media_gap=media_gap,\n target_scaler=target_scaler,\n seed=seed).mean(axis=0))\n\n\n@jax.jit\ndef _budget_constraint(media: jnp.ndarray,\n prices: jnp.ndarray,\n budget: jnp.ndarray) -> jnp.float64:\n \"\"\"Calculates optimization constraint to keep spend equal to the budget.\n\n Args:\n media: Array with the values of the media for this iteration.\n prices: Prices of each media channel at any given time.\n budget: Total budget of the optimization.\n\n Returns:\n The result from substracting the total spending and the budget.\n \"\"\"\n media = media.reshape((-1, len(prices)))\n return jnp.sum(media * prices) - budget\n\n\ndef _get_lower_and_upper_bounds(\n media: jnp.ndarray,\n n_time_periods: int,\n lower_pct: jnp.ndarray,\n upper_pct: jnp.ndarray,\n media_scaler: Optional[preprocessing.CustomScaler] = None\n) -> optimize.Bounds:\n \"\"\"Gets the lower and upper bounds for optimisation based on historic data.\n\n It creates an upper bound based on a percentage above the mean value on\n each channel and a lower bound based on a relative decrease of the mean\n value.\n\n Args:\n media: Media data to get historic mean.\n n_time_periods: Number of time periods to optimize for. If model is built on\n weekly data, this would be the number of weeks ahead to optimize.\n lower_pct: Relative percentage decrease from the mean value to consider as\n new lower bound.\n upper_pct: Relative percentage increase from the mean value to consider as\n new upper bound.\n media_scaler: Scaler that was used to scale the media data before training.\n\n Returns:\n A list of tuples with the lower and upper bound for each media channel.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/35", "ground_truth": " if media.ndim == 3:\n lower_pct = jnp.expand_dims(lower_pct, axis=-1)\n upper_pct = jnp.expand_dims(upper_pct, axis=-1)\n\n mean_data = media.mean(axis=0)\n lower_bounds = jnp.maximum(mean_data * (1 - lower_pct), 0)\n upper_bounds = mean_data * (1 + upper_pct)\n\n if media_scaler:\n lower_bounds = media_scaler.inverse_transform(lower_bounds)\n upper_bounds = media_scaler.inverse_transform(upper_bounds)\n\n if media.ndim == 3:\n lower_bounds = lower_bounds.sum(axis=-1)\n upper_bounds = upper_bounds.sum(axis=-1)\n\n return optimize.Bounds(lb=lower_bounds * n_time_periods,\n ub=upper_bounds * n_time_periods)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "optimize_media.py"], "context_start_lineno": 0, "lineno": 126, "function_name": "_get_lower_and_upper_bounds"}, "groundtruth": " if media.ndim == 3:\n lower_pct = jnp.expand_dims(lower_pct, axis=-1)\n upper_pct = jnp.expand_dims(upper_pct, axis=-1)\n\n mean_data = media.mean(axis=0)\n lower_bounds = jnp.maximum(mean_data * (1 - lower_pct), 0)\n upper_bounds = mean_data * (1 + upper_pct)\n\n if media_scaler:\n lower_bounds = media_scaler.inverse_transform(lower_bounds)\n upper_bounds = media_scaler.inverse_transform(upper_bounds)\n\n if media.ndim == 3:\n lower_bounds = lower_bounds.sum(axis=-1)\n upper_bounds = upper_bounds.sum(axis=-1)\n\n return optimize.Bounds(lb=lower_bounds * n_time_periods,\n ub=upper_bounds * n_time_periods)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for optimizing your media based on media mix models.\"\"\"\nimport functools\nfrom typing import Optional, Tuple, Union\nfrom absl import logging\nimport jax\nimport jax.numpy as jnp\nfrom scipy import optimize\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import preprocessing\n\n\n@functools.partial(\n jax.jit,\n static_argnames=(\"media_mix_model\", \"media_input_shape\", \"target_scaler\",\n \"media_scaler\"))\ndef _objective_function(extra_features: jnp.ndarray,\n media_mix_model: lightweight_mmm.LightweightMMM,\n media_input_shape: Tuple[int,\n int], media_gap: Optional[int],\n target_scaler: Optional[preprocessing.CustomScaler],\n media_scaler: preprocessing.CustomScaler,\n geo_ratio: jnp.array,\n seed: Optional[int],\n media_values: jnp.ndarray) -> jnp.float64:\n \"\"\"Objective function to calculate the sum of all predictions of the model.\n\n Args:\n extra_features: Extra features the model requires for prediction.\n media_mix_model: Media mix model to use. Must have a predict method to be\n used.\n media_input_shape: Input shape of the data required by the model to get\n predictions. This is needed since optimization might flatten some arrays\n and they need to be reshaped before running new predictions.\n media_gap: Media data gap between the end of training data and the start of\n the out of sample media given. Eg. if 100 weeks of data were used for\n training and prediction starts 2 months after training data finished we\n need to provide the 8 weeks missing between the training data and the\n prediction data so data transformations (adstock, carryover, ...) can take\n place correctly.\n target_scaler: Scaler that was used to scale the target before training.\n media_scaler: Scaler that was used to scale the media data before training.\n geo_ratio: The ratio to split channel media across geo. Should sum up to 1\n for each channel and should have shape (c, g).\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that gets predictions with the same\n seed.\n media_values: Media values required by the model to run predictions.\n\n Returns:\n The negative value of the sum of all predictions.\n \"\"\"\n if hasattr(media_mix_model, \"n_geos\") and media_mix_model.n_geos > 1:\n media_values = geo_ratio * jnp.expand_dims(media_values, axis=-1)\n media_values = jnp.tile(\n media_values / media_input_shape[0], reps=media_input_shape[0])\n # Distribute budget of each channels across time.\n media_values = jnp.reshape(a=media_values, newshape=media_input_shape)\n media_values = media_scaler.transform(media_values)\n return -jnp.sum(\n media_mix_model.predict(\n media=media_values.reshape(media_input_shape),\n extra_features=extra_features,\n media_gap=media_gap,\n target_scaler=target_scaler,\n seed=seed).mean(axis=0))\n\n\n@jax.jit\ndef _budget_constraint(media: jnp.ndarray,\n prices: jnp.ndarray,\n budget: jnp.ndarray) -> jnp.float64:\n \"\"\"Calculates optimization constraint to keep spend equal to the budget.\n\n Args:\n media: Array with the values of the media for this iteration.\n prices: Prices of each media channel at any given time.\n budget: Total budget of the optimization.\n\n Returns:\n The result from substracting the total spending and the budget.\n \"\"\"\n media = media.reshape((-1, len(prices)))\n return jnp.sum(media * prices) - budget\n\n\ndef _get_lower_and_upper_bounds(\n media: jnp.ndarray,\n n_time_periods: int,\n lower_pct: jnp.ndarray,\n upper_pct: jnp.ndarray,\n media_scaler: Optional[preprocessing.CustomScaler] = None\n) -> optimize.Bounds:\n \"\"\"Gets the lower and upper bounds for optimisation based on historic data.\n\n It creates an upper bound based on a percentage above the mean value on\n each channel and a lower bound based on a relative decrease of the mean\n value.\n\n Args:\n media: Media data to get historic mean.\n n_time_periods: Number of time periods to optimize for. If model is built on\n weekly data, this would be the number of weeks ahead to optimize.\n lower_pct: Relative percentage decrease from the mean value to consider as\n new lower bound.\n upper_pct: Relative percentage increase from the mean value to consider as\n new upper bound.\n media_scaler: Scaler that was used to scale the media data before training.\n\n Returns:\n A list of tuples with the lower and upper bound for each media channel.\n \"\"\"\n if media.ndim == 3:\n lower_pct = jnp.expand_dims(lower_pct, axis=-1)\n upper_pct = jnp.expand_dims(upper_pct, axis=-1)\n\n mean_data = media.mean(axis=0)\n lower_bounds = jnp.maximum(mean_data * (1 - lower_pct), 0)\n upper_bounds = mean_data * (1 + upper_pct)\n\n if media_scaler:\n lower_bounds = media_scaler.inverse_transform(lower_bounds)\n upper_bounds = media_scaler.inverse_transform(upper_bounds)\n\n if media.ndim == 3:\n lower_bounds = lower_bounds.sum(axis=-1)\n upper_bounds = upper_bounds.sum(axis=-1)\n\n return optimize.Bounds(lb=lower_bounds * n_time_periods,\n ub=upper_bounds * n_time_periods)\n\n\ndef _generate_starting_values(\n n_time_periods: int, media: jnp.ndarray,\n media_scaler: preprocessing.CustomScaler,\n budget: Union[float, int],\n prices: jnp.ndarray,\n) -> jnp.ndarray:\n \"\"\"Generates starting values based on historic allocation and budget.\n\n In order to make a comparison we can take the allocation of the last\n `n_time_periods` and scale it based on the given budget. Given this, one can\n compare how this initial values (based on average historic allocation) compare\n to the output of the optimisation in terms of sales/KPI.\n\n Args:\n n_time_periods: Number of time periods the optimization will be done with.\n media: Historic media data the model was trained with.\n media_scaler: Scaler that was used to scale the media data before training.\n budget: Total budget to allocate during the optimization time.\n prices: An array with shape (n_media_channels,) for the cost of each media\n channel unit.\n\n Returns:\n An array with the starting value for each media channel for the\n optimization.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/36", "ground_truth": " previous_allocation = media.mean(axis=0) * n_time_periods\n if media_scaler: # Scale before sum as geo scaler has shape (c, g).\n previous_allocation = media_scaler.inverse_transform(previous_allocation)\n\n if media.ndim == 3:\n previous_allocation = previous_allocation.sum(axis=-1)\n\n avg_spend_per_channel = previous_allocation * prices\n pct_spend_per_channel = avg_spend_per_channel / avg_spend_per_channel.sum()\n budget_per_channel = budget * pct_spend_per_channel\n media_unit_per_channel = budget_per_channel / prices\n return media_unit_per_channel\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "optimize_media.py"], "context_start_lineno": 0, "lineno": 171, "function_name": "_generate_starting_values"}, "groundtruth": " previous_allocation = media.mean(axis=0) * n_time_periods\n if media_scaler: # Scale before sum as geo scaler has shape (c, g).\n previous_allocation = media_scaler.inverse_transform(previous_allocation)\n\n if media.ndim == 3:\n previous_allocation = previous_allocation.sum(axis=-1)\n\n avg_spend_per_channel = previous_allocation * prices\n pct_spend_per_channel = avg_spend_per_channel / avg_spend_per_channel.sum()\n budget_per_channel = budget * pct_spend_per_channel\n media_unit_per_channel = budget_per_channel / prices\n return media_unit_per_channel\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for preprocessing dataset for training LightweightMMM.\"\"\"\n\nimport copy\nfrom typing import Callable, List, Optional, Sequence, Tuple, Union\n\nimport jax.numpy as jnp\nimport pandas as pd\nfrom sklearn import base\n\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nfrom statsmodels.tools.tools import add_constant\nfrom lightweight_mmm.core import core_utils\n\n\nclass NotFittedScalerError(Exception):\n pass\n\n\nclass CustomScaler(base.TransformerMixin):\n \"\"\"Class to scale your data based on multiplications and divisions.\n\n This scaler can be used in two fashions for both the multiplication and\n division operation.\n - By specifying a value to use for the scaling operation.\n - By specifying an operation used at column level to calculate the value\n for the actual scaling operation.\n\n Eg. if one wants to scale the dataset by multiply by 100 you can directly\n pass multiply_by=100. Value can also be an array with as many values\n as column has the data being scaled. But if you want to multiply by the mean\n value of each column, then you can pass multiply_operation=jnp.mean (or any\n other operation desired).\n\n Operation parameters have the upper hand in the cases where both values and\n operations are passed, values will be ignored in this case.\n\n Scaler must be fit first in order to call the transform method.\n\n Attributes.\n divide_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for division during scaling.\n divide_by: Numbers(s) by which to divide data in the scaling process. Since\n the scaler is applied to axis 0 of the data, the shape of divide_by must\n be consistent with division into the data. For example, if data.shape =\n (100, 3, 5) then divide_by.shape can be (3, 5) or (5,) or a number. If\n divide_operation is given, this divide_by value will be ignored.\n multiply_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for multiplication during scaling.\n multiply_by: Numbers(s) by which to multiply data in the scaling process.\n Since the scaler is applied to axis 0 of the data, the shape of\n multiply_by must be consistent with multiplication into the data. For\n example, if data.shape = (100, 3, 5) then multiply_by.shape can be (3, 5)\n or (5,) or a number. If multiply_operation is given, this multiply_by\n value will be ignored.\n \"\"\"\n\n def __init__(\n self,\n divide_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n divide_by: Optional[Union[float, int, jnp.ndarray]] = 1,\n multiply_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n multiply_by: Optional[Union[float, int, jnp.ndarray]] = 1.) -> None:\n \"\"\"Constructor for the CustomScaler class.\"\"\"\n if all([\n divide_by is None, divide_operation is None, multiply_by is None,\n multiply_operation is None\n ]):\n raise ValueError(\"No values for transformations were provided and this \"\n \"scaler will fail. Please instantiate a valid one\")\n\n if divide_operation is None and divide_by is None:\n raise ValueError(\n \"Either a division operation or value needs to be passed. If \"\n \"you dont want to use a division to scale your data just \"\n \"pass divide_by=1.\")\n elif divide_operation is not None:\n self.divide_operation = divide_operation\n else:\n self.divide_by = divide_by\n\n if multiply_operation is None and multiply_by is None:\n raise ValueError(\n \"Either a multiplication operation or value needs to be passed. If \"\n \"you dont want to use a multiplication to scale your data just \"\n \"pass multiply_by=1.\")\n elif multiply_operation is not None:\n self.multiply_operation = multiply_operation\n else:\n self.multiply_by = multiply_by\n\n def fit(self, data: jnp.ndarray) -> None:\n \"\"\"Figures out values for transformations based on the specified operations.\n\n Args:\n data: Input dataset to use for fitting.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/37", "ground_truth": " if hasattr(self, \"divide_operation\"):\n self.divide_by = jnp.apply_along_axis(\n func1d=self.divide_operation, axis=0, arr=data)\n elif isinstance(self.divide_by, int) or isinstance(self.divide_by, float):\n self.divide_by = self.divide_by * jnp.ones(data.shape[1:])\n if hasattr(self, \"multiply_operation\"):\n self.multiply_by = jnp.apply_along_axis(\n func1d=self.multiply_operation, axis=0, arr=data)\n elif isinstance(self.multiply_by, int) or isinstance(\n self.multiply_by, float):\n self.multiply_by = self.multiply_by * jnp.ones(data.shape[1:])\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "preprocessing.py"], "context_start_lineno": 0, "lineno": 110, "function_name": "fit"}, "groundtruth": " if hasattr(self, \"divide_operation\"):\n self.divide_by = jnp.apply_along_axis(\n func1d=self.divide_operation, axis=0, arr=data)\n elif isinstance(self.divide_by, int) or isinstance(self.divide_by, float):\n self.divide_by = self.divide_by * jnp.ones(data.shape[1:])\n if hasattr(self, \"multiply_operation\"):\n self.multiply_by = jnp.apply_along_axis(\n func1d=self.multiply_operation, axis=0, arr=data)\n elif isinstance(self.multiply_by, int) or isinstance(\n self.multiply_by, float):\n self.multiply_by = self.multiply_by * jnp.ones(data.shape[1:])\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for preprocessing dataset for training LightweightMMM.\"\"\"\n\nimport copy\nfrom typing import Callable, List, Optional, Sequence, Tuple, Union\n\nimport jax.numpy as jnp\nimport pandas as pd\nfrom sklearn import base\n\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nfrom statsmodels.tools.tools import add_constant\nfrom lightweight_mmm.core import core_utils\n\n\nclass NotFittedScalerError(Exception):\n pass\n\n\nclass CustomScaler(base.TransformerMixin):\n \"\"\"Class to scale your data based on multiplications and divisions.\n\n This scaler can be used in two fashions for both the multiplication and\n division operation.\n - By specifying a value to use for the scaling operation.\n - By specifying an operation used at column level to calculate the value\n for the actual scaling operation.\n\n Eg. if one wants to scale the dataset by multiply by 100 you can directly\n pass multiply_by=100. Value can also be an array with as many values\n as column has the data being scaled. But if you want to multiply by the mean\n value of each column, then you can pass multiply_operation=jnp.mean (or any\n other operation desired).\n\n Operation parameters have the upper hand in the cases where both values and\n operations are passed, values will be ignored in this case.\n\n Scaler must be fit first in order to call the transform method.\n\n Attributes.\n divide_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for division during scaling.\n divide_by: Numbers(s) by which to divide data in the scaling process. Since\n the scaler is applied to axis 0 of the data, the shape of divide_by must\n be consistent with division into the data. For example, if data.shape =\n (100, 3, 5) then divide_by.shape can be (3, 5) or (5,) or a number. If\n divide_operation is given, this divide_by value will be ignored.\n multiply_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for multiplication during scaling.\n multiply_by: Numbers(s) by which to multiply data in the scaling process.\n Since the scaler is applied to axis 0 of the data, the shape of\n multiply_by must be consistent with multiplication into the data. For\n example, if data.shape = (100, 3, 5) then multiply_by.shape can be (3, 5)\n or (5,) or a number. If multiply_operation is given, this multiply_by\n value will be ignored.\n \"\"\"\n\n def __init__(\n self,\n divide_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n divide_by: Optional[Union[float, int, jnp.ndarray]] = 1,\n multiply_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n multiply_by: Optional[Union[float, int, jnp.ndarray]] = 1.) -> None:\n \"\"\"Constructor for the CustomScaler class.\"\"\"\n if all([\n divide_by is None, divide_operation is None, multiply_by is None,\n multiply_operation is None\n ]):\n raise ValueError(\"No values for transformations were provided and this \"\n \"scaler will fail. Please instantiate a valid one\")\n\n if divide_operation is None and divide_by is None:\n raise ValueError(\n \"Either a division operation or value needs to be passed. If \"\n \"you dont want to use a division to scale your data just \"\n \"pass divide_by=1.\")\n elif divide_operation is not None:\n self.divide_operation = divide_operation\n else:\n self.divide_by = divide_by\n\n if multiply_operation is None and multiply_by is None:\n raise ValueError(\n \"Either a multiplication operation or value needs to be passed. If \"\n \"you dont want to use a multiplication to scale your data just \"\n \"pass multiply_by=1.\")\n elif multiply_operation is not None:\n self.multiply_operation = multiply_operation\n else:\n self.multiply_by = multiply_by\n\n def fit(self, data: jnp.ndarray) -> None:\n \"\"\"Figures out values for transformations based on the specified operations.\n\n Args:\n data: Input dataset to use for fitting.\n \"\"\"\n if hasattr(self, \"divide_operation\"):\n self.divide_by = jnp.apply_along_axis(\n func1d=self.divide_operation, axis=0, arr=data)\n elif isinstance(self.divide_by, int) or isinstance(self.divide_by, float):\n self.divide_by = self.divide_by * jnp.ones(data.shape[1:])\n if hasattr(self, \"multiply_operation\"):\n self.multiply_by = jnp.apply_along_axis(\n func1d=self.multiply_operation, axis=0, arr=data)\n elif isinstance(self.multiply_by, int) or isinstance(\n self.multiply_by, float):\n self.multiply_by = self.multiply_by * jnp.ones(data.shape[1:])\n\n def transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies transformation based on fitted values.\n\n It can only be called if scaler was fit first.\n\n Args:\n data: Input dataset to transform.\n\n Returns:\n Transformed array.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/38", "ground_truth": " if not hasattr(self, \"divide_by\") or not hasattr(self, \"multiply_by\"):\n raise NotFittedScalerError(\n \"transform is called without fit being called previously. Please \"\n \"fit scaler first.\")\n return self.multiply_by * data / self.divide_by\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "preprocessing.py"], "context_start_lineno": 0, "lineno": 133, "function_name": "transform"}, "groundtruth": " if not hasattr(self, \"divide_by\") or not hasattr(self, \"multiply_by\"):\n raise NotFittedScalerError(\n \"transform is called without fit being called previously. Please \"\n \"fit scaler first.\")\n return self.multiply_by * data / self.divide_by\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for preprocessing dataset for training LightweightMMM.\"\"\"\n\nimport copy\nfrom typing import Callable, List, Optional, Sequence, Tuple, Union\n\nimport jax.numpy as jnp\nimport pandas as pd\nfrom sklearn import base\n\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nfrom statsmodels.tools.tools import add_constant\nfrom lightweight_mmm.core import core_utils\n\n\nclass NotFittedScalerError(Exception):\n pass\n\n\nclass CustomScaler(base.TransformerMixin):\n \"\"\"Class to scale your data based on multiplications and divisions.\n\n This scaler can be used in two fashions for both the multiplication and\n division operation.\n - By specifying a value to use for the scaling operation.\n - By specifying an operation used at column level to calculate the value\n for the actual scaling operation.\n\n Eg. if one wants to scale the dataset by multiply by 100 you can directly\n pass multiply_by=100. Value can also be an array with as many values\n as column has the data being scaled. But if you want to multiply by the mean\n value of each column, then you can pass multiply_operation=jnp.mean (or any\n other operation desired).\n\n Operation parameters have the upper hand in the cases where both values and\n operations are passed, values will be ignored in this case.\n\n Scaler must be fit first in order to call the transform method.\n\n Attributes.\n divide_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for division during scaling.\n divide_by: Numbers(s) by which to divide data in the scaling process. Since\n the scaler is applied to axis 0 of the data, the shape of divide_by must\n be consistent with division into the data. For example, if data.shape =\n (100, 3, 5) then divide_by.shape can be (3, 5) or (5,) or a number. If\n divide_operation is given, this divide_by value will be ignored.\n multiply_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for multiplication during scaling.\n multiply_by: Numbers(s) by which to multiply data in the scaling process.\n Since the scaler is applied to axis 0 of the data, the shape of\n multiply_by must be consistent with multiplication into the data. For\n example, if data.shape = (100, 3, 5) then multiply_by.shape can be (3, 5)\n or (5,) or a number. If multiply_operation is given, this multiply_by\n value will be ignored.\n \"\"\"\n\n def __init__(\n self,\n divide_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n divide_by: Optional[Union[float, int, jnp.ndarray]] = 1,\n multiply_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n multiply_by: Optional[Union[float, int, jnp.ndarray]] = 1.) -> None:\n \"\"\"Constructor for the CustomScaler class.\"\"\"\n if all([\n divide_by is None, divide_operation is None, multiply_by is None,\n multiply_operation is None\n ]):\n raise ValueError(\"No values for transformations were provided and this \"\n \"scaler will fail. Please instantiate a valid one\")\n\n if divide_operation is None and divide_by is None:\n raise ValueError(\n \"Either a division operation or value needs to be passed. If \"\n \"you dont want to use a division to scale your data just \"\n \"pass divide_by=1.\")\n elif divide_operation is not None:\n self.divide_operation = divide_operation\n else:\n self.divide_by = divide_by\n\n if multiply_operation is None and multiply_by is None:\n raise ValueError(\n \"Either a multiplication operation or value needs to be passed. If \"\n \"you dont want to use a multiplication to scale your data just \"\n \"pass multiply_by=1.\")\n elif multiply_operation is not None:\n self.multiply_operation = multiply_operation\n else:\n self.multiply_by = multiply_by\n\n def fit(self, data: jnp.ndarray) -> None:\n \"\"\"Figures out values for transformations based on the specified operations.\n\n Args:\n data: Input dataset to use for fitting.\n \"\"\"\n if hasattr(self, \"divide_operation\"):\n self.divide_by = jnp.apply_along_axis(\n func1d=self.divide_operation, axis=0, arr=data)\n elif isinstance(self.divide_by, int) or isinstance(self.divide_by, float):\n self.divide_by = self.divide_by * jnp.ones(data.shape[1:])\n if hasattr(self, \"multiply_operation\"):\n self.multiply_by = jnp.apply_along_axis(\n func1d=self.multiply_operation, axis=0, arr=data)\n elif isinstance(self.multiply_by, int) or isinstance(\n self.multiply_by, float):\n self.multiply_by = self.multiply_by * jnp.ones(data.shape[1:])\n\n def transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies transformation based on fitted values.\n\n It can only be called if scaler was fit first.\n\n Args:\n data: Input dataset to transform.\n\n Returns:\n Transformed array.\n \"\"\"\n if not hasattr(self, \"divide_by\") or not hasattr(self, \"multiply_by\"):\n raise NotFittedScalerError(\n \"transform is called without fit being called previously. Please \"\n \"fit scaler first.\")\n return self.multiply_by * data / self.divide_by\n\n def fit_transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Fits the values and applies transformation to the input data.\n\n Args:\n data: Input dataset.\n\n Returns:\n Transformed array.\n \"\"\"\n self.fit(data)\n return self.transform(data)\n\n def inverse_transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Runs inverse transformation to get original values.\n\n Args:\n data: Input dataset.\n\n Returns:\n Dataset with the inverse transformation applied.\n \"\"\"\n return self.divide_by * data / self.multiply_by\n\n\ndef _compute_correlations(\n features: jnp.ndarray,\n target: jnp.ndarray,\n feature_names: List[str],\n ) -> List[pd.DataFrame]:\n \"\"\"Computes feature-feature and feature-target correlations.\n\n Helper function for DataQualityCheck.\n\n Args:\n features: Features for media mix model (media and non-media variables).\n target: Target variable for media mix model.\n feature_names: Names of media channels to be added to the output dataframes.\n\n Returns:\n List of dataframes containing Pearson correlation coefficients between each\n feature, as well as between features and the target variable. For\n national-level data the list contains just one dataframe, and for\n geo-level data the list contains one dataframe for each geo.\n\n Raises:\n ValueError: If features and target have incompatible shapes (e.g. one is\n geo-level and the other national-level).\n \"\"\"\n if not ((features.ndim == 2 and target.ndim == 1) or\n (features.ndim == 3 and target.ndim == 2)):\n raise ValueError(f\"Incompatible shapes between features {features.shape}\"\n f\" and target {target.shape}.\")\n\n number_of_geos = core_utils.get_number_geos(features)\n correlation_matrix_output = []\n for i_geo in range(number_of_geos):\n\n if number_of_geos == 1:\n features_and_target = jnp.concatenate(\n [features, jnp.expand_dims(target, axis=1)], axis=1)\n else:\n features_and_target = jnp.concatenate(\n [features[:, :, i_geo],\n jnp.expand_dims(target[:, i_geo], axis=1)],\n axis=1)\n\n covariance_matrix = jnp.cov(features_and_target, rowvar=False)\n standard_deviations = jnp.std(features_and_target, axis=0, ddof=1)\n correlation_matrix = covariance_matrix / jnp.outer(standard_deviations,\n standard_deviations)\n correlation_matrix = pd.DataFrame(\n correlation_matrix,\n columns=feature_names + [\"target\"],\n index=feature_names + [\"target\"],\n dtype=float)\n correlation_matrix_output.append(correlation_matrix)\n\n return correlation_matrix_output\n\n\ndef _compute_variances(\n features: jnp.ndarray,\n feature_names: Sequence[str],\n geo_names: Sequence[str],\n) -> pd.DataFrame:\n \"\"\"Computes variances over time for each feature.\n\n In general, higher variance is better since it creates more signal for the\n regression analysis. However, if the features have not been scaled (divided by\n the mean), then the variance can take any value and this analysis is not\n meaningful.\n\n Args:\n features: Features for media mix model (media and non-media variables).\n feature_names: Names of media channels to be added to the output dataframe.\n geo_names: Names of geos to be added to the output dataframes.\n\n Returns:\n Dataframe containing the variance over time for each feature. This dataframe\n contains one row per geo, and just a single row for national data.\n\n Raises:\n ValueError: If the number of geos in features does not match the number of\n supplied geo_names.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/39", "ground_truth": " number_of_geos = core_utils.get_number_geos(features)\n\n if len(geo_names) != number_of_geos:\n raise ValueError(\"The number of geos in features does not match the length \"\n \"of geo_names\")\n\n variances_as_series = []\n for i_geo in range(number_of_geos):\n features_for_this_geo = features[...,\n i_geo] if number_of_geos > 1 else features\n variances_as_series.append(\n pd.DataFrame(data=features_for_this_geo).var(axis=0, ddof=0))\n\n variances = pd.concat(variances_as_series, axis=1)\n variances.columns = geo_names\n variances.index = copy.copy(feature_names)\n\n return variances\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "preprocessing.py"], "context_start_lineno": 0, "lineno": 244, "function_name": "_compute_variances"}, "groundtruth": " number_of_geos = core_utils.get_number_geos(features)\n\n if len(geo_names) != number_of_geos:\n raise ValueError(\"The number of geos in features does not match the length \"\n \"of geo_names\")\n\n variances_as_series = []\n for i_geo in range(number_of_geos):\n features_for_this_geo = features[...,\n i_geo] if number_of_geos > 1 else features\n variances_as_series.append(\n pd.DataFrame(data=features_for_this_geo).var(axis=0, ddof=0))\n\n variances = pd.concat(variances_as_series, axis=1)\n variances.columns = geo_names\n variances.index = copy.copy(feature_names)\n\n return variances\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for preprocessing dataset for training LightweightMMM.\"\"\"\n\nimport copy\nfrom typing import Callable, List, Optional, Sequence, Tuple, Union\n\nimport jax.numpy as jnp\nimport pandas as pd\nfrom sklearn import base\n\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nfrom statsmodels.tools.tools import add_constant\nfrom lightweight_mmm.core import core_utils\n\n\nclass NotFittedScalerError(Exception):\n pass\n\n\nclass CustomScaler(base.TransformerMixin):\n \"\"\"Class to scale your data based on multiplications and divisions.\n\n This scaler can be used in two fashions for both the multiplication and\n division operation.\n - By specifying a value to use for the scaling operation.\n - By specifying an operation used at column level to calculate the value\n for the actual scaling operation.\n\n Eg. if one wants to scale the dataset by multiply by 100 you can directly\n pass multiply_by=100. Value can also be an array with as many values\n as column has the data being scaled. But if you want to multiply by the mean\n value of each column, then you can pass multiply_operation=jnp.mean (or any\n other operation desired).\n\n Operation parameters have the upper hand in the cases where both values and\n operations are passed, values will be ignored in this case.\n\n Scaler must be fit first in order to call the transform method.\n\n Attributes.\n divide_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for division during scaling.\n divide_by: Numbers(s) by which to divide data in the scaling process. Since\n the scaler is applied to axis 0 of the data, the shape of divide_by must\n be consistent with division into the data. For example, if data.shape =\n (100, 3, 5) then divide_by.shape can be (3, 5) or (5,) or a number. If\n divide_operation is given, this divide_by value will be ignored.\n multiply_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for multiplication during scaling.\n multiply_by: Numbers(s) by which to multiply data in the scaling process.\n Since the scaler is applied to axis 0 of the data, the shape of\n multiply_by must be consistent with multiplication into the data. For\n example, if data.shape = (100, 3, 5) then multiply_by.shape can be (3, 5)\n or (5,) or a number. If multiply_operation is given, this multiply_by\n value will be ignored.\n \"\"\"\n\n def __init__(\n self,\n divide_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n divide_by: Optional[Union[float, int, jnp.ndarray]] = 1,\n multiply_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n multiply_by: Optional[Union[float, int, jnp.ndarray]] = 1.) -> None:\n \"\"\"Constructor for the CustomScaler class.\"\"\"\n if all([\n divide_by is None, divide_operation is None, multiply_by is None,\n multiply_operation is None\n ]):\n raise ValueError(\"No values for transformations were provided and this \"\n \"scaler will fail. Please instantiate a valid one\")\n\n if divide_operation is None and divide_by is None:\n raise ValueError(\n \"Either a division operation or value needs to be passed. If \"\n \"you dont want to use a division to scale your data just \"\n \"pass divide_by=1.\")\n elif divide_operation is not None:\n self.divide_operation = divide_operation\n else:\n self.divide_by = divide_by\n\n if multiply_operation is None and multiply_by is None:\n raise ValueError(\n \"Either a multiplication operation or value needs to be passed. If \"\n \"you dont want to use a multiplication to scale your data just \"\n \"pass multiply_by=1.\")\n elif multiply_operation is not None:\n self.multiply_operation = multiply_operation\n else:\n self.multiply_by = multiply_by\n\n def fit(self, data: jnp.ndarray) -> None:\n \"\"\"Figures out values for transformations based on the specified operations.\n\n Args:\n data: Input dataset to use for fitting.\n \"\"\"\n if hasattr(self, \"divide_operation\"):\n self.divide_by = jnp.apply_along_axis(\n func1d=self.divide_operation, axis=0, arr=data)\n elif isinstance(self.divide_by, int) or isinstance(self.divide_by, float):\n self.divide_by = self.divide_by * jnp.ones(data.shape[1:])\n if hasattr(self, \"multiply_operation\"):\n self.multiply_by = jnp.apply_along_axis(\n func1d=self.multiply_operation, axis=0, arr=data)\n elif isinstance(self.multiply_by, int) or isinstance(\n self.multiply_by, float):\n self.multiply_by = self.multiply_by * jnp.ones(data.shape[1:])\n\n def transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies transformation based on fitted values.\n\n It can only be called if scaler was fit first.\n\n Args:\n data: Input dataset to transform.\n\n Returns:\n Transformed array.\n \"\"\"\n if not hasattr(self, \"divide_by\") or not hasattr(self, \"multiply_by\"):\n raise NotFittedScalerError(\n \"transform is called without fit being called previously. Please \"\n \"fit scaler first.\")\n return self.multiply_by * data / self.divide_by\n\n def fit_transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Fits the values and applies transformation to the input data.\n\n Args:\n data: Input dataset.\n\n Returns:\n Transformed array.\n \"\"\"\n self.fit(data)\n return self.transform(data)\n\n def inverse_transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Runs inverse transformation to get original values.\n\n Args:\n data: Input dataset.\n\n Returns:\n Dataset with the inverse transformation applied.\n \"\"\"\n return self.divide_by * data / self.multiply_by\n\n\ndef _compute_correlations(\n features: jnp.ndarray,\n target: jnp.ndarray,\n feature_names: List[str],\n ) -> List[pd.DataFrame]:\n \"\"\"Computes feature-feature and feature-target correlations.\n\n Helper function for DataQualityCheck.\n\n Args:\n features: Features for media mix model (media and non-media variables).\n target: Target variable for media mix model.\n feature_names: Names of media channels to be added to the output dataframes.\n\n Returns:\n List of dataframes containing Pearson correlation coefficients between each\n feature, as well as between features and the target variable. For\n national-level data the list contains just one dataframe, and for\n geo-level data the list contains one dataframe for each geo.\n\n Raises:\n ValueError: If features and target have incompatible shapes (e.g. one is\n geo-level and the other national-level).\n \"\"\"\n if not ((features.ndim == 2 and target.ndim == 1) or\n (features.ndim == 3 and target.ndim == 2)):\n raise ValueError(f\"Incompatible shapes between features {features.shape}\"\n f\" and target {target.shape}.\")\n\n number_of_geos = core_utils.get_number_geos(features)\n correlation_matrix_output = []\n for i_geo in range(number_of_geos):\n\n if number_of_geos == 1:\n features_and_target = jnp.concatenate(\n [features, jnp.expand_dims(target, axis=1)], axis=1)\n else:\n features_and_target = jnp.concatenate(\n [features[:, :, i_geo],\n jnp.expand_dims(target[:, i_geo], axis=1)],\n axis=1)\n\n covariance_matrix = jnp.cov(features_and_target, rowvar=False)\n standard_deviations = jnp.std(features_and_target, axis=0, ddof=1)\n correlation_matrix = covariance_matrix / jnp.outer(standard_deviations,\n standard_deviations)\n correlation_matrix = pd.DataFrame(\n correlation_matrix,\n columns=feature_names + [\"target\"],\n index=feature_names + [\"target\"],\n dtype=float)\n correlation_matrix_output.append(correlation_matrix)\n\n return correlation_matrix_output\n\n\ndef _compute_variances(\n features: jnp.ndarray,\n feature_names: Sequence[str],\n geo_names: Sequence[str],\n) -> pd.DataFrame:\n \"\"\"Computes variances over time for each feature.\n\n In general, higher variance is better since it creates more signal for the\n regression analysis. However, if the features have not been scaled (divided by\n the mean), then the variance can take any value and this analysis is not\n meaningful.\n\n Args:\n features: Features for media mix model (media and non-media variables).\n feature_names: Names of media channels to be added to the output dataframe.\n geo_names: Names of geos to be added to the output dataframes.\n\n Returns:\n Dataframe containing the variance over time for each feature. This dataframe\n contains one row per geo, and just a single row for national data.\n\n Raises:\n ValueError: If the number of geos in features does not match the number of\n supplied geo_names.\n \"\"\"\n number_of_geos = core_utils.get_number_geos(features)\n\n if len(geo_names) != number_of_geos:\n raise ValueError(\"The number of geos in features does not match the length \"\n \"of geo_names\")\n\n variances_as_series = []\n for i_geo in range(number_of_geos):\n features_for_this_geo = features[...,\n i_geo] if number_of_geos > 1 else features\n variances_as_series.append(\n pd.DataFrame(data=features_for_this_geo).var(axis=0, ddof=0))\n\n variances = pd.concat(variances_as_series, axis=1)\n variances.columns = geo_names\n variances.index = copy.copy(feature_names)\n\n return variances\n\n\ndef _compute_spend_fractions(\n cost_data: jnp.ndarray,\n channel_names: Optional[Sequence[str]] = None,\n output_column_name: str = \"fraction of spend\") -> pd.DataFrame:\n \"\"\"Computes fraction of total spend for each media channel.\n\n Args:\n cost_data: Spend (can be normalized or not) per channel.\n channel_names: Names of media channels to be added to the output dataframe.\n output_column_name: Name of the column in the output dataframe, denoting the\n fraction of the total spend in each media channel.\n\n Returns:\n Dataframe containing fraction of the total spend in each channel.\n\n Raises:\n ValueError if any of the costs are zero or negative.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/40", "ground_truth": " cost_df = pd.DataFrame(\n cost_data, index=channel_names, columns=[output_column_name])\n\n if (cost_df[output_column_name] <= 0).any():\n raise ValueError(\"Values in cost_data must all be positive.\")\n\n normalized_cost_df = cost_df.div(cost_df.sum(axis=0), axis=1).round(4)\n return normalized_cost_df\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "preprocessing.py"], "context_start_lineno": 0, "lineno": 282, "function_name": "_compute_spend_fractions"}, "groundtruth": " cost_df = pd.DataFrame(\n cost_data, index=channel_names, columns=[output_column_name])\n\n if (cost_df[output_column_name] <= 0).any():\n raise ValueError(\"Values in cost_data must all be positive.\")\n\n normalized_cost_df = cost_df.div(cost_df.sum(axis=0), axis=1).round(4)\n return normalized_cost_df\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilities for preprocessing dataset for training LightweightMMM.\"\"\"\n\nimport copy\nfrom typing import Callable, List, Optional, Sequence, Tuple, Union\n\nimport jax.numpy as jnp\nimport pandas as pd\nfrom sklearn import base\n\nfrom statsmodels.stats.outliers_influence import variance_inflation_factor\nfrom statsmodels.tools.tools import add_constant\nfrom lightweight_mmm.core import core_utils\n\n\nclass NotFittedScalerError(Exception):\n pass\n\n\nclass CustomScaler(base.TransformerMixin):\n \"\"\"Class to scale your data based on multiplications and divisions.\n\n This scaler can be used in two fashions for both the multiplication and\n division operation.\n - By specifying a value to use for the scaling operation.\n - By specifying an operation used at column level to calculate the value\n for the actual scaling operation.\n\n Eg. if one wants to scale the dataset by multiply by 100 you can directly\n pass multiply_by=100. Value can also be an array with as many values\n as column has the data being scaled. But if you want to multiply by the mean\n value of each column, then you can pass multiply_operation=jnp.mean (or any\n other operation desired).\n\n Operation parameters have the upper hand in the cases where both values and\n operations are passed, values will be ignored in this case.\n\n Scaler must be fit first in order to call the transform method.\n\n Attributes.\n divide_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for division during scaling.\n divide_by: Numbers(s) by which to divide data in the scaling process. Since\n the scaler is applied to axis 0 of the data, the shape of divide_by must\n be consistent with division into the data. For example, if data.shape =\n (100, 3, 5) then divide_by.shape can be (3, 5) or (5,) or a number. If\n divide_operation is given, this divide_by value will be ignored.\n multiply_operation: Operation to apply over axis 0 of the fitting data to\n obtain the value that will be used for multiplication during scaling.\n multiply_by: Numbers(s) by which to multiply data in the scaling process.\n Since the scaler is applied to axis 0 of the data, the shape of\n multiply_by must be consistent with multiplication into the data. For\n example, if data.shape = (100, 3, 5) then multiply_by.shape can be (3, 5)\n or (5,) or a number. If multiply_operation is given, this multiply_by\n value will be ignored.\n \"\"\"\n\n def __init__(\n self,\n divide_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n divide_by: Optional[Union[float, int, jnp.ndarray]] = 1,\n multiply_operation: Optional[Callable[[jnp.ndarray], jnp.float32]] = None,\n multiply_by: Optional[Union[float, int, jnp.ndarray]] = 1.) -> None:\n \"\"\"Constructor for the CustomScaler class.\"\"\"\n if all([\n divide_by is None, divide_operation is None, multiply_by is None,\n multiply_operation is None\n ]):\n raise ValueError(\"No values for transformations were provided and this \"\n \"scaler will fail. Please instantiate a valid one\")\n\n if divide_operation is None and divide_by is None:\n raise ValueError(\n \"Either a division operation or value needs to be passed. If \"\n \"you dont want to use a division to scale your data just \"\n \"pass divide_by=1.\")\n elif divide_operation is not None:\n self.divide_operation = divide_operation\n else:\n self.divide_by = divide_by\n\n if multiply_operation is None and multiply_by is None:\n raise ValueError(\n \"Either a multiplication operation or value needs to be passed. If \"\n \"you dont want to use a multiplication to scale your data just \"\n \"pass multiply_by=1.\")\n elif multiply_operation is not None:\n self.multiply_operation = multiply_operation\n else:\n self.multiply_by = multiply_by\n\n def fit(self, data: jnp.ndarray) -> None:\n \"\"\"Figures out values for transformations based on the specified operations.\n\n Args:\n data: Input dataset to use for fitting.\n \"\"\"\n if hasattr(self, \"divide_operation\"):\n self.divide_by = jnp.apply_along_axis(\n func1d=self.divide_operation, axis=0, arr=data)\n elif isinstance(self.divide_by, int) or isinstance(self.divide_by, float):\n self.divide_by = self.divide_by * jnp.ones(data.shape[1:])\n if hasattr(self, \"multiply_operation\"):\n self.multiply_by = jnp.apply_along_axis(\n func1d=self.multiply_operation, axis=0, arr=data)\n elif isinstance(self.multiply_by, int) or isinstance(\n self.multiply_by, float):\n self.multiply_by = self.multiply_by * jnp.ones(data.shape[1:])\n\n def transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Applies transformation based on fitted values.\n\n It can only be called if scaler was fit first.\n\n Args:\n data: Input dataset to transform.\n\n Returns:\n Transformed array.\n \"\"\"\n if not hasattr(self, \"divide_by\") or not hasattr(self, \"multiply_by\"):\n raise NotFittedScalerError(\n \"transform is called without fit being called previously. Please \"\n \"fit scaler first.\")\n return self.multiply_by * data / self.divide_by\n\n def fit_transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Fits the values and applies transformation to the input data.\n\n Args:\n data: Input dataset.\n\n Returns:\n Transformed array.\n \"\"\"\n self.fit(data)\n return self.transform(data)\n\n def inverse_transform(self, data: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Runs inverse transformation to get original values.\n\n Args:\n data: Input dataset.\n\n Returns:\n Dataset with the inverse transformation applied.\n \"\"\"\n return self.divide_by * data / self.multiply_by\n\n\ndef _compute_correlations(\n features: jnp.ndarray,\n target: jnp.ndarray,\n feature_names: List[str],\n ) -> List[pd.DataFrame]:\n \"\"\"Computes feature-feature and feature-target correlations.\n\n Helper function for DataQualityCheck.\n\n Args:\n features: Features for media mix model (media and non-media variables).\n target: Target variable for media mix model.\n feature_names: Names of media channels to be added to the output dataframes.\n\n Returns:\n List of dataframes containing Pearson correlation coefficients between each\n feature, as well as between features and the target variable. For\n national-level data the list contains just one dataframe, and for\n geo-level data the list contains one dataframe for each geo.\n\n Raises:\n ValueError: If features and target have incompatible shapes (e.g. one is\n geo-level and the other national-level).\n \"\"\"\n if not ((features.ndim == 2 and target.ndim == 1) or\n (features.ndim == 3 and target.ndim == 2)):\n raise ValueError(f\"Incompatible shapes between features {features.shape}\"\n f\" and target {target.shape}.\")\n\n number_of_geos = core_utils.get_number_geos(features)\n correlation_matrix_output = []\n for i_geo in range(number_of_geos):\n\n if number_of_geos == 1:\n features_and_target = jnp.concatenate(\n [features, jnp.expand_dims(target, axis=1)], axis=1)\n else:\n features_and_target = jnp.concatenate(\n [features[:, :, i_geo],\n jnp.expand_dims(target[:, i_geo], axis=1)],\n axis=1)\n\n covariance_matrix = jnp.cov(features_and_target, rowvar=False)\n standard_deviations = jnp.std(features_and_target, axis=0, ddof=1)\n correlation_matrix = covariance_matrix / jnp.outer(standard_deviations,\n standard_deviations)\n correlation_matrix = pd.DataFrame(\n correlation_matrix,\n columns=feature_names + [\"target\"],\n index=feature_names + [\"target\"],\n dtype=float)\n correlation_matrix_output.append(correlation_matrix)\n\n return correlation_matrix_output\n\n\ndef _compute_variances(\n features: jnp.ndarray,\n feature_names: Sequence[str],\n geo_names: Sequence[str],\n) -> pd.DataFrame:\n \"\"\"Computes variances over time for each feature.\n\n In general, higher variance is better since it creates more signal for the\n regression analysis. However, if the features have not been scaled (divided by\n the mean), then the variance can take any value and this analysis is not\n meaningful.\n\n Args:\n features: Features for media mix model (media and non-media variables).\n feature_names: Names of media channels to be added to the output dataframe.\n geo_names: Names of geos to be added to the output dataframes.\n\n Returns:\n Dataframe containing the variance over time for each feature. This dataframe\n contains one row per geo, and just a single row for national data.\n\n Raises:\n ValueError: If the number of geos in features does not match the number of\n supplied geo_names.\n \"\"\"\n number_of_geos = core_utils.get_number_geos(features)\n\n if len(geo_names) != number_of_geos:\n raise ValueError(\"The number of geos in features does not match the length \"\n \"of geo_names\")\n\n variances_as_series = []\n for i_geo in range(number_of_geos):\n features_for_this_geo = features[...,\n i_geo] if number_of_geos > 1 else features\n variances_as_series.append(\n pd.DataFrame(data=features_for_this_geo).var(axis=0, ddof=0))\n\n variances = pd.concat(variances_as_series, axis=1)\n variances.columns = geo_names\n variances.index = copy.copy(feature_names)\n\n return variances\n\n\ndef _compute_spend_fractions(\n cost_data: jnp.ndarray,\n channel_names: Optional[Sequence[str]] = None,\n output_column_name: str = \"fraction of spend\") -> pd.DataFrame:\n \"\"\"Computes fraction of total spend for each media channel.\n\n Args:\n cost_data: Spend (can be normalized or not) per channel.\n channel_names: Names of media channels to be added to the output dataframe.\n output_column_name: Name of the column in the output dataframe, denoting the\n fraction of the total spend in each media channel.\n\n Returns:\n Dataframe containing fraction of the total spend in each channel.\n\n Raises:\n ValueError if any of the costs are zero or negative.\n \"\"\"\n cost_df = pd.DataFrame(\n cost_data, index=channel_names, columns=[output_column_name])\n\n if (cost_df[output_column_name] <= 0).any():\n raise ValueError(\"Values in cost_data must all be positive.\")\n\n normalized_cost_df = cost_df.div(cost_df.sum(axis=0), axis=1).round(4)\n return normalized_cost_df\n\n\ndef _compute_variance_inflation_factors(\n features: jnp.ndarray, feature_names: Sequence[str],\n geo_names: Sequence[str]) -> pd.DataFrame:\n \"\"\"Computes variance inflation factors for all features.\n\n Helper function for DataQualityCheck.\n\n Args:\n features: Features for media mix model (media and non-media variables).\n feature_names: Names of media channels to be added to the output dataframe.\n geo_names: Names of geos to be added to the output dataframes.\n\n Returns:\n Dataframe containing variance inflation factors for each feature. For\n national-level data the dataframe contains just one column, and for\n geo-level data the list contains one column for each geo.\n\n Raises:\n ValueError: If the number of geos in features does not match the number of\n supplied geo_names.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/41", "ground_truth": " number_of_geos = core_utils.get_number_geos(features)\n\n if len(geo_names) != number_of_geos:\n raise ValueError(\"The number of geos in features does not match the length \"\n \"of geo_names\")\n\n vifs_for_each_geo = []\n for i_geo in range(number_of_geos):\n features_for_this_geo = features[...,\n i_geo] if number_of_geos > 1 else features\n features_for_this_geo = add_constant(\n pd.DataFrame(features_for_this_geo, dtype=float), has_constant=\"skip\")\n\n vifs_for_this_geo = []\n for i, feature in enumerate(features_for_this_geo.columns):\n if feature != \"const\":\n vifs_for_this_geo.append(\n variance_inflation_factor(features_for_this_geo.values, i))\n\n vifs_for_each_geo.append(vifs_for_this_geo)\n\n vif_df = pd.DataFrame(data=zip(*vifs_for_each_geo), dtype=float)\n vif_df.columns = geo_names\n vif_df.index = copy.copy(feature_names)\n\n return vif_df\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "preprocessing.py"], "context_start_lineno": 0, "lineno": 313, "function_name": "_compute_variance_inflation_factors"}, "groundtruth": " number_of_geos = core_utils.get_number_geos(features)\n\n if len(geo_names) != number_of_geos:\n raise ValueError(\"The number of geos in features does not match the length \"\n \"of geo_names\")\n\n vifs_for_each_geo = []\n for i_geo in range(number_of_geos):\n features_for_this_geo = features[...,\n i_geo] if number_of_geos > 1 else features\n features_for_this_geo = add_constant(\n pd.DataFrame(features_for_this_geo, dtype=float), has_constant=\"skip\")\n\n vifs_for_this_geo = []\n for i, feature in enumerate(features_for_this_geo.columns):\n if feature != \"const\":\n vifs_for_this_geo.append(\n variance_inflation_factor(features_for_this_geo.values, i))\n\n vifs_for_each_geo.append(vifs_for_this_geo)\n\n vif_df = pd.DataFrame(data=zip(*vifs_for_each_geo), dtype=float)\n vif_df.columns = geo_names\n vif_df.index = copy.copy(feature_names)\n\n return vif_df\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of utilities for LightweighMMM package.\"\"\"\nimport pickle\nimport time\nfrom typing import Any, List, Optional, Tuple\n\nfrom absl import logging\nfrom jax import random\nimport jax.numpy as jnp\nimport numpy as np\nimport pandas as pd\nfrom scipy import interpolate\nfrom scipy import optimize\nfrom scipy import spatial\nfrom scipy import stats\nfrom tensorflow.io import gfile\n\nfrom lightweight_mmm import media_transforms\n\n\ndef save_model(\n media_mix_model: Any,\n file_path: str\n ) -> None:\n \"\"\"Saves the given model in the given path.\n\n Args:\n media_mix_model: Model to save on disk.\n file_path: File path where the model should be placed.\n \"\"\"\n with gfile.GFile(file_path, \"wb\") as file:\n pickle.dump(obj=media_mix_model, file=file)\n\n\ndef load_model(file_path: str) -> Any:\n \"\"\"Loads a model given a string path.\n\n Args:\n file_path: Path of the file containing the model.\n\n Returns:\n The LightweightMMM object that was stored in the given path.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/42", "ground_truth": " with gfile.GFile(file_path, \"rb\") as file:\n media_mix_model = pickle.load(file=file)\n\n for attr in dir(media_mix_model):\n if attr.startswith(\"__\"):\n continue\n attr_value = getattr(media_mix_model, attr)\n if isinstance(attr_value, np.ndarray):\n setattr(media_mix_model, attr, jnp.array(attr_value))\n\n return media_mix_model\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "utils.py"], "context_start_lineno": 0, "lineno": 56, "function_name": "load_model"}, "groundtruth": " with gfile.GFile(file_path, \"rb\") as file:\n media_mix_model = pickle.load(file=file)\n\n for attr in dir(media_mix_model):\n if attr.startswith(\"__\"):\n continue\n attr_value = getattr(media_mix_model, attr)\n if isinstance(attr_value, np.ndarray):\n setattr(media_mix_model, attr, jnp.array(attr_value))\n\n return media_mix_model\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of utilities for LightweighMMM package.\"\"\"\nimport pickle\nimport time\nfrom typing import Any, List, Optional, Tuple\n\nfrom absl import logging\nfrom jax import random\nimport jax.numpy as jnp\nimport numpy as np\nimport pandas as pd\nfrom scipy import interpolate\nfrom scipy import optimize\nfrom scipy import spatial\nfrom scipy import stats\nfrom tensorflow.io import gfile\n\nfrom lightweight_mmm import media_transforms\n\n\ndef save_model(\n media_mix_model: Any,\n file_path: str\n ) -> None:\n \"\"\"Saves the given model in the given path.\n\n Args:\n media_mix_model: Model to save on disk.\n file_path: File path where the model should be placed.\n \"\"\"\n with gfile.GFile(file_path, \"wb\") as file:\n pickle.dump(obj=media_mix_model, file=file)\n\n\ndef load_model(file_path: str) -> Any:\n \"\"\"Loads a model given a string path.\n\n Args:\n file_path: Path of the file containing the model.\n\n Returns:\n The LightweightMMM object that was stored in the given path.\n \"\"\"\n with gfile.GFile(file_path, \"rb\") as file:\n media_mix_model = pickle.load(file=file)\n\n for attr in dir(media_mix_model):\n if attr.startswith(\"__\"):\n continue\n attr_value = getattr(media_mix_model, attr)\n if isinstance(attr_value, np.ndarray):\n setattr(media_mix_model, attr, jnp.array(attr_value))\n\n return media_mix_model\n\n\ndef get_time_seed() -> int:\n \"\"\"Generates an integer using the last decimals of time.time().\n\n Returns:\n Integer to be used as seed.\n \"\"\"\n # time.time() has the following format: 1645174953.0429401\n return int(str(time.time()).split(\".\")[1])\n\n\ndef simulate_dummy_data(\n data_size: int,\n n_media_channels: int,\n n_extra_features: int,\n geos: int = 1,\n seed: int = 5\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Simulates dummy data needed for media mix modelling.\n\n This function's goal is to be super simple and not have many parameters,\n although it does not generate a fully realistic dataset is only meant to be\n used for demos/tutorial purposes. Uses carryover for lagging but has no\n saturation and no trend.\n\n The data simulated includes the media data, extra features, a target/KPI and\n costs.\n\n Args:\n data_size: Number of rows to generate.\n n_media_channels: Number of media channels to generate.\n n_extra_features: Number of extra features to generate.\n geos: Number of geos for geo level data (default = 1 for national).\n seed: Random seed.\n\n Returns:\n The simulated media, extra features, target and costs.\n \"\"\"\n if data_size < 1 or n_media_channels < 1 or n_extra_features < 1:\n raise ValueError(\n \"Data size, n_media_channels and n_extra_features must be greater than\"\n \" 0. Please check the values introduced are greater than zero.\")\n data_offset = int(data_size * 0.2)\n data_size += data_offset\n key = random.PRNGKey(seed)\n sub_keys = random.split(key=key, num=7)\n media_data = random.normal(key=sub_keys[0],\n shape=(data_size, n_media_channels)) * 1.5 + 20\n\n extra_features = random.normal(key=sub_keys[1],\n shape=(data_size, n_extra_features)) + 5\n # Reduce the costs to make ROI realistic.\n costs = media_data[data_offset:].sum(axis=0) * .1\n\n seasonality = media_transforms.calculate_seasonality(\n number_periods=data_size,\n degrees=2,\n frequency=52,\n gamma_seasonality=1)\n target_noise = random.normal(key=sub_keys[2], shape=(data_size,)) + 3\n\n # media_data_transformed = media_transforms.adstock(media_data)\n media_data_transformed = media_transforms.carryover(\n data=media_data,\n ad_effect_retention_rate=jnp.full((n_media_channels,), fill_value=.5),\n peak_effect_delay=jnp.full((n_media_channels,), fill_value=1.))\n beta_media = random.normal(key=sub_keys[3], shape=(n_media_channels,)) + 1\n beta_extra_features = random.normal(key=sub_keys[4],\n shape=(n_extra_features,))\n # There is no trend to keep this very simple.\n target = 15 + seasonality + media_data_transformed.dot(\n beta_media) + extra_features.dot(beta_extra_features) + target_noise\n\n logging.info(\"Correlation between transformed media and target\")\n logging.info([\n np.corrcoef(target[data_offset:], media_data_transformed[data_offset:,\n i])[0, 1]\n for i in range(n_media_channels)\n ])\n\n logging.info(\"True ROI for media channels\")\n logging.info([\n sum(media_data_transformed[data_offset:, i] * beta_media[i]) / costs[i]\n for i in range(n_media_channels)\n ])\n\n if geos > 1:\n # Distribute national data to geo and add some more noise.\n weights = random.uniform(key=sub_keys[5], shape=(1, geos))\n weights /= sum(weights)\n target_noise = random.normal(key=sub_keys[6], shape=(data_size, geos)) * .5\n target = target[:, np.newaxis].dot(weights) + target_noise\n media_data = media_data[:, :, np.newaxis].dot(weights)\n extra_features = extra_features[:, :, np.newaxis].dot(weights)\n\n return (media_data[data_offset:], extra_features[data_offset:],\n target[data_offset:], costs)\n\n\ndef _split_array_into_list(\n dataframe: pd.DataFrame,\n split_level_feature: str,\n features: List[str],\n national_model_flag: bool = True) -> List[np.ndarray]:\n \"\"\"Splits data frame into list of jax arrays.\n\n Args:\n dataframe: Dataframe with all the modeling feature.\n split_level_feature: Feature that will be used to split.\n features: List of feature to export from data frame.\n national_model_flag: Whether the data frame is used for national model.\n\n Returns:\n List of jax arrays.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/43", "ground_truth": " split_level = dataframe[split_level_feature].unique()\n array_list_by_level = [\n dataframe.loc[dataframe[split_level_feature] == level, features].values.T\n for level in split_level\n ]\n feature_array = jnp.stack(array_list_by_level)\n if national_model_flag:\n feature_array = jnp.squeeze(feature_array, axis=2)\n return feature_array\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "utils.py"], "context_start_lineno": 0, "lineno": 183, "function_name": "_split_array_into_list"}, "groundtruth": " split_level = dataframe[split_level_feature].unique()\n array_list_by_level = [\n dataframe.loc[dataframe[split_level_feature] == level, features].values.T\n for level in split_level\n ]\n feature_array = jnp.stack(array_list_by_level)\n if national_model_flag:\n feature_array = jnp.squeeze(feature_array, axis=2)\n return feature_array\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of utilities for LightweighMMM package.\"\"\"\nimport pickle\nimport time\nfrom typing import Any, List, Optional, Tuple\n\nfrom absl import logging\nfrom jax import random\nimport jax.numpy as jnp\nimport numpy as np\nimport pandas as pd\nfrom scipy import interpolate\nfrom scipy import optimize\nfrom scipy import spatial\nfrom scipy import stats\nfrom tensorflow.io import gfile\n\nfrom lightweight_mmm import media_transforms\n\n\ndef save_model(\n media_mix_model: Any,\n file_path: str\n ) -> None:\n \"\"\"Saves the given model in the given path.\n\n Args:\n media_mix_model: Model to save on disk.\n file_path: File path where the model should be placed.\n \"\"\"\n with gfile.GFile(file_path, \"wb\") as file:\n pickle.dump(obj=media_mix_model, file=file)\n\n\ndef load_model(file_path: str) -> Any:\n \"\"\"Loads a model given a string path.\n\n Args:\n file_path: Path of the file containing the model.\n\n Returns:\n The LightweightMMM object that was stored in the given path.\n \"\"\"\n with gfile.GFile(file_path, \"rb\") as file:\n media_mix_model = pickle.load(file=file)\n\n for attr in dir(media_mix_model):\n if attr.startswith(\"__\"):\n continue\n attr_value = getattr(media_mix_model, attr)\n if isinstance(attr_value, np.ndarray):\n setattr(media_mix_model, attr, jnp.array(attr_value))\n\n return media_mix_model\n\n\ndef get_time_seed() -> int:\n \"\"\"Generates an integer using the last decimals of time.time().\n\n Returns:\n Integer to be used as seed.\n \"\"\"\n # time.time() has the following format: 1645174953.0429401\n return int(str(time.time()).split(\".\")[1])\n\n\ndef simulate_dummy_data(\n data_size: int,\n n_media_channels: int,\n n_extra_features: int,\n geos: int = 1,\n seed: int = 5\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Simulates dummy data needed for media mix modelling.\n\n This function's goal is to be super simple and not have many parameters,\n although it does not generate a fully realistic dataset is only meant to be\n used for demos/tutorial purposes. Uses carryover for lagging but has no\n saturation and no trend.\n\n The data simulated includes the media data, extra features, a target/KPI and\n costs.\n\n Args:\n data_size: Number of rows to generate.\n n_media_channels: Number of media channels to generate.\n n_extra_features: Number of extra features to generate.\n geos: Number of geos for geo level data (default = 1 for national).\n seed: Random seed.\n\n Returns:\n The simulated media, extra features, target and costs.\n \"\"\"\n if data_size < 1 or n_media_channels < 1 or n_extra_features < 1:\n raise ValueError(\n \"Data size, n_media_channels and n_extra_features must be greater than\"\n \" 0. Please check the values introduced are greater than zero.\")\n data_offset = int(data_size * 0.2)\n data_size += data_offset\n key = random.PRNGKey(seed)\n sub_keys = random.split(key=key, num=7)\n media_data = random.normal(key=sub_keys[0],\n shape=(data_size, n_media_channels)) * 1.5 + 20\n\n extra_features = random.normal(key=sub_keys[1],\n shape=(data_size, n_extra_features)) + 5\n # Reduce the costs to make ROI realistic.\n costs = media_data[data_offset:].sum(axis=0) * .1\n\n seasonality = media_transforms.calculate_seasonality(\n number_periods=data_size,\n degrees=2,\n frequency=52,\n gamma_seasonality=1)\n target_noise = random.normal(key=sub_keys[2], shape=(data_size,)) + 3\n\n # media_data_transformed = media_transforms.adstock(media_data)\n media_data_transformed = media_transforms.carryover(\n data=media_data,\n ad_effect_retention_rate=jnp.full((n_media_channels,), fill_value=.5),\n peak_effect_delay=jnp.full((n_media_channels,), fill_value=1.))\n beta_media = random.normal(key=sub_keys[3], shape=(n_media_channels,)) + 1\n beta_extra_features = random.normal(key=sub_keys[4],\n shape=(n_extra_features,))\n # There is no trend to keep this very simple.\n target = 15 + seasonality + media_data_transformed.dot(\n beta_media) + extra_features.dot(beta_extra_features) + target_noise\n\n logging.info(\"Correlation between transformed media and target\")\n logging.info([\n np.corrcoef(target[data_offset:], media_data_transformed[data_offset:,\n i])[0, 1]\n for i in range(n_media_channels)\n ])\n\n logging.info(\"True ROI for media channels\")\n logging.info([\n sum(media_data_transformed[data_offset:, i] * beta_media[i]) / costs[i]\n for i in range(n_media_channels)\n ])\n\n if geos > 1:\n # Distribute national data to geo and add some more noise.\n weights = random.uniform(key=sub_keys[5], shape=(1, geos))\n weights /= sum(weights)\n target_noise = random.normal(key=sub_keys[6], shape=(data_size, geos)) * .5\n target = target[:, np.newaxis].dot(weights) + target_noise\n media_data = media_data[:, :, np.newaxis].dot(weights)\n extra_features = extra_features[:, :, np.newaxis].dot(weights)\n\n return (media_data[data_offset:], extra_features[data_offset:],\n target[data_offset:], costs)\n\n\ndef _split_array_into_list(\n dataframe: pd.DataFrame,\n split_level_feature: str,\n features: List[str],\n national_model_flag: bool = True) -> List[np.ndarray]:\n \"\"\"Splits data frame into list of jax arrays.\n\n Args:\n dataframe: Dataframe with all the modeling feature.\n split_level_feature: Feature that will be used to split.\n features: List of feature to export from data frame.\n national_model_flag: Whether the data frame is used for national model.\n\n Returns:\n List of jax arrays.\n \"\"\"\n split_level = dataframe[split_level_feature].unique()\n array_list_by_level = [\n dataframe.loc[dataframe[split_level_feature] == level, features].values.T\n for level in split_level\n ]\n feature_array = jnp.stack(array_list_by_level)\n if national_model_flag:\n feature_array = jnp.squeeze(feature_array, axis=2)\n return feature_array\n\n\ndef dataframe_to_jax(\n dataframe: pd.DataFrame,\n media_features: List[str],\n extra_features: List[str],\n date_feature: str,\n target: str,\n geo_feature: Optional[str] = None,\n cost_features: Optional[List[str]] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Converts pandas dataframe to right data format for media mix model.\n\n This function's goal is to convert dataframe which is most familar with data\n scientists to jax arrays to help the users who are not familar with array to\n use the lightweight MMM library easier.\n\n Args:\n dataframe: Dataframe with geo, KPI, media and non-media features.\n media_features: List of media feature names.\n extra_features: List of non media feature names.\n date_feature: Date feature name.\n target: Target variables name.\n geo_feature: Geo feature name and it is optional if the data is at national\n level.\n cost_features: List of media cost variables and it is optional if user\n use actual media cost as their media features in the model.\n\n Returns:\n Media, extra features, target and costs arrays.\n\n Raises:\n ValueError: If each geo has unequal number of weeks or there is only one\n value in the geo feature.\n \"\"\"\n if geo_feature is not None:\n if dataframe[geo_feature].nunique() == 1:\n raise ValueError(\n \"Geo feature has at least two geos or keep default for national model\"\n )\n count_by_geo = dataframe.groupby(\n geo_feature)[date_feature].count().reset_index()\n unique_date_count = count_by_geo[date_feature].nunique()\n if unique_date_count != 1:\n raise ValueError(\"Not all the geos have same number of weeks.\")\n national_model_flag = False\n features_to_sort = [date_feature, geo_feature]\n else:\n national_model_flag = True\n features_to_sort = [date_feature]\n\n df_sorted = dataframe.sort_values(by=features_to_sort)\n media_features_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=media_features,\n national_model_flag=national_model_flag)\n\n extra_features_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=extra_features,\n national_model_flag=national_model_flag)\n\n target_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=[target],\n national_model_flag=national_model_flag)\n target_data = jnp.squeeze(target_data)\n\n if cost_features:\n cost_data = jnp.dot(\n jnp.full(len(dataframe), 1), dataframe[cost_features].values)\n else:\n cost_data = jnp.dot(\n jnp.full(len(dataframe), 1), dataframe[media_features].values)\n return (media_features_data, extra_features_data, target_data, cost_data)# jax-ndarray\n\n\ndef get_halfnormal_mean_from_scale(scale: float) -> float:\n \"\"\"Returns the mean of the half-normal distribition.\"\"\"\n # https://en.wikipedia.org/wiki/Half-normal_distribution\n return scale * np.sqrt(2) / np.sqrt(np.pi)\n\n\ndef get_halfnormal_scale_from_mean(mean: float) -> float:\n \"\"\"Returns the scale of the half-normal distribution.\"\"\"\n # https://en.wikipedia.org/wiki/Half-normal_distribution\n return mean * np.sqrt(np.pi) / np.sqrt(2)\n\n\ndef get_beta_params_from_mu_sigma(mu: float,\n sigma: float,\n bracket: Tuple[float, float] = (.5, 100.)\n ) -> Tuple[float, float]:\n \"\"\"Deterministically estimates (a, b) from (mu, sigma) of a beta variable.\n\n https://en.wikipedia.org/wiki/Beta_distribution\n\n Args:\n mu: The sample mean of the beta distributed variable.\n sigma: The sample standard deviation of the beta distributed variable.\n bracket: Search bracket for b.\n\n Returns:\n Tuple of the (a, b) parameters.\n \"\"\"\n # Assume a = 1 to find b.", "metadata": {"task_id": "google--lightweight_mmm/44", "ground_truth": " def _f(x):\n return x ** 2 + 4 * x + 5 + 2 / x - 1 / sigma ** 2\n b = optimize.root_scalar(_f, bracket=bracket, method=\"brentq\").root\n # Given b, now find a better a.\n a = b / (1 / mu - 1)\n return a, b\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "utils.py"], "context_start_lineno": 0, "lineno": 301, "function_name": "get_beta_params_from_mu_sigma"}, "groundtruth": " def _f(x):\n return x ** 2 + 4 * x + 5 + 2 / x - 1 / sigma ** 2\n b = optimize.root_scalar(_f, bracket=bracket, method=\"brentq\").root\n # Given b, now find a better a.\n a = b / (1 / mu - 1)\n return a, b\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of utilities for LightweighMMM package.\"\"\"\nimport pickle\nimport time\nfrom typing import Any, List, Optional, Tuple\n\nfrom absl import logging\nfrom jax import random\nimport jax.numpy as jnp\nimport numpy as np\nimport pandas as pd\nfrom scipy import interpolate\nfrom scipy import optimize\nfrom scipy import spatial\nfrom scipy import stats\nfrom tensorflow.io import gfile\n\nfrom lightweight_mmm import media_transforms\n\n\ndef save_model(\n media_mix_model: Any,\n file_path: str\n ) -> None:\n \"\"\"Saves the given model in the given path.\n\n Args:\n media_mix_model: Model to save on disk.\n file_path: File path where the model should be placed.\n \"\"\"\n with gfile.GFile(file_path, \"wb\") as file:\n pickle.dump(obj=media_mix_model, file=file)\n\n\ndef load_model(file_path: str) -> Any:\n \"\"\"Loads a model given a string path.\n\n Args:\n file_path: Path of the file containing the model.\n\n Returns:\n The LightweightMMM object that was stored in the given path.\n \"\"\"\n with gfile.GFile(file_path, \"rb\") as file:\n media_mix_model = pickle.load(file=file)\n\n for attr in dir(media_mix_model):\n if attr.startswith(\"__\"):\n continue\n attr_value = getattr(media_mix_model, attr)\n if isinstance(attr_value, np.ndarray):\n setattr(media_mix_model, attr, jnp.array(attr_value))\n\n return media_mix_model\n\n\ndef get_time_seed() -> int:\n \"\"\"Generates an integer using the last decimals of time.time().\n\n Returns:\n Integer to be used as seed.\n \"\"\"\n # time.time() has the following format: 1645174953.0429401\n return int(str(time.time()).split(\".\")[1])\n\n\ndef simulate_dummy_data(\n data_size: int,\n n_media_channels: int,\n n_extra_features: int,\n geos: int = 1,\n seed: int = 5\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Simulates dummy data needed for media mix modelling.\n\n This function's goal is to be super simple and not have many parameters,\n although it does not generate a fully realistic dataset is only meant to be\n used for demos/tutorial purposes. Uses carryover for lagging but has no\n saturation and no trend.\n\n The data simulated includes the media data, extra features, a target/KPI and\n costs.\n\n Args:\n data_size: Number of rows to generate.\n n_media_channels: Number of media channels to generate.\n n_extra_features: Number of extra features to generate.\n geos: Number of geos for geo level data (default = 1 for national).\n seed: Random seed.\n\n Returns:\n The simulated media, extra features, target and costs.\n \"\"\"\n if data_size < 1 or n_media_channels < 1 or n_extra_features < 1:\n raise ValueError(\n \"Data size, n_media_channels and n_extra_features must be greater than\"\n \" 0. Please check the values introduced are greater than zero.\")\n data_offset = int(data_size * 0.2)\n data_size += data_offset\n key = random.PRNGKey(seed)\n sub_keys = random.split(key=key, num=7)\n media_data = random.normal(key=sub_keys[0],\n shape=(data_size, n_media_channels)) * 1.5 + 20\n\n extra_features = random.normal(key=sub_keys[1],\n shape=(data_size, n_extra_features)) + 5\n # Reduce the costs to make ROI realistic.\n costs = media_data[data_offset:].sum(axis=0) * .1\n\n seasonality = media_transforms.calculate_seasonality(\n number_periods=data_size,\n degrees=2,\n frequency=52,\n gamma_seasonality=1)\n target_noise = random.normal(key=sub_keys[2], shape=(data_size,)) + 3\n\n # media_data_transformed = media_transforms.adstock(media_data)\n media_data_transformed = media_transforms.carryover(\n data=media_data,\n ad_effect_retention_rate=jnp.full((n_media_channels,), fill_value=.5),\n peak_effect_delay=jnp.full((n_media_channels,), fill_value=1.))\n beta_media = random.normal(key=sub_keys[3], shape=(n_media_channels,)) + 1\n beta_extra_features = random.normal(key=sub_keys[4],\n shape=(n_extra_features,))\n # There is no trend to keep this very simple.\n target = 15 + seasonality + media_data_transformed.dot(\n beta_media) + extra_features.dot(beta_extra_features) + target_noise\n\n logging.info(\"Correlation between transformed media and target\")\n logging.info([\n np.corrcoef(target[data_offset:], media_data_transformed[data_offset:,\n i])[0, 1]\n for i in range(n_media_channels)\n ])\n\n logging.info(\"True ROI for media channels\")\n logging.info([\n sum(media_data_transformed[data_offset:, i] * beta_media[i]) / costs[i]\n for i in range(n_media_channels)\n ])\n\n if geos > 1:\n # Distribute national data to geo and add some more noise.\n weights = random.uniform(key=sub_keys[5], shape=(1, geos))\n weights /= sum(weights)\n target_noise = random.normal(key=sub_keys[6], shape=(data_size, geos)) * .5\n target = target[:, np.newaxis].dot(weights) + target_noise\n media_data = media_data[:, :, np.newaxis].dot(weights)\n extra_features = extra_features[:, :, np.newaxis].dot(weights)\n\n return (media_data[data_offset:], extra_features[data_offset:],\n target[data_offset:], costs)\n\n\ndef _split_array_into_list(\n dataframe: pd.DataFrame,\n split_level_feature: str,\n features: List[str],\n national_model_flag: bool = True) -> List[np.ndarray]:\n \"\"\"Splits data frame into list of jax arrays.\n\n Args:\n dataframe: Dataframe with all the modeling feature.\n split_level_feature: Feature that will be used to split.\n features: List of feature to export from data frame.\n national_model_flag: Whether the data frame is used for national model.\n\n Returns:\n List of jax arrays.\n \"\"\"\n split_level = dataframe[split_level_feature].unique()\n array_list_by_level = [\n dataframe.loc[dataframe[split_level_feature] == level, features].values.T\n for level in split_level\n ]\n feature_array = jnp.stack(array_list_by_level)\n if national_model_flag:\n feature_array = jnp.squeeze(feature_array, axis=2)\n return feature_array\n\n\ndef dataframe_to_jax(\n dataframe: pd.DataFrame,\n media_features: List[str],\n extra_features: List[str],\n date_feature: str,\n target: str,\n geo_feature: Optional[str] = None,\n cost_features: Optional[List[str]] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Converts pandas dataframe to right data format for media mix model.\n\n This function's goal is to convert dataframe which is most familar with data\n scientists to jax arrays to help the users who are not familar with array to\n use the lightweight MMM library easier.\n\n Args:\n dataframe: Dataframe with geo, KPI, media and non-media features.\n media_features: List of media feature names.\n extra_features: List of non media feature names.\n date_feature: Date feature name.\n target: Target variables name.\n geo_feature: Geo feature name and it is optional if the data is at national\n level.\n cost_features: List of media cost variables and it is optional if user\n use actual media cost as their media features in the model.\n\n Returns:\n Media, extra features, target and costs arrays.\n\n Raises:\n ValueError: If each geo has unequal number of weeks or there is only one\n value in the geo feature.\n \"\"\"\n if geo_feature is not None:\n if dataframe[geo_feature].nunique() == 1:\n raise ValueError(\n \"Geo feature has at least two geos or keep default for national model\"\n )\n count_by_geo = dataframe.groupby(\n geo_feature)[date_feature].count().reset_index()\n unique_date_count = count_by_geo[date_feature].nunique()\n if unique_date_count != 1:\n raise ValueError(\"Not all the geos have same number of weeks.\")\n national_model_flag = False\n features_to_sort = [date_feature, geo_feature]\n else:\n national_model_flag = True\n features_to_sort = [date_feature]\n\n df_sorted = dataframe.sort_values(by=features_to_sort)\n media_features_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=media_features,\n national_model_flag=national_model_flag)\n\n extra_features_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=extra_features,\n national_model_flag=national_model_flag)\n\n target_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=[target],\n national_model_flag=national_model_flag)\n target_data = jnp.squeeze(target_data)\n\n if cost_features:\n cost_data = jnp.dot(\n jnp.full(len(dataframe), 1), dataframe[cost_features].values)\n else:\n cost_data = jnp.dot(\n jnp.full(len(dataframe), 1), dataframe[media_features].values)\n return (media_features_data, extra_features_data, target_data, cost_data)# jax-ndarray\n\n\ndef get_halfnormal_mean_from_scale(scale: float) -> float:\n \"\"\"Returns the mean of the half-normal distribition.\"\"\"\n # https://en.wikipedia.org/wiki/Half-normal_distribution\n return scale * np.sqrt(2) / np.sqrt(np.pi)\n\n\ndef get_halfnormal_scale_from_mean(mean: float) -> float:\n \"\"\"Returns the scale of the half-normal distribution.\"\"\"\n # https://en.wikipedia.org/wiki/Half-normal_distribution\n return mean * np.sqrt(np.pi) / np.sqrt(2)\n\n\ndef get_beta_params_from_mu_sigma(mu: float,\n sigma: float,\n bracket: Tuple[float, float] = (.5, 100.)\n ) -> Tuple[float, float]:\n \"\"\"Deterministically estimates (a, b) from (mu, sigma) of a beta variable.\n\n https://en.wikipedia.org/wiki/Beta_distribution\n\n Args:\n mu: The sample mean of the beta distributed variable.\n sigma: The sample standard deviation of the beta distributed variable.\n bracket: Search bracket for b.\n\n Returns:\n Tuple of the (a, b) parameters.\n \"\"\"\n # Assume a = 1 to find b.\n def _f(x):\n return x ** 2 + 4 * x + 5 + 2 / x - 1 / sigma ** 2\n b = optimize.root_scalar(_f, bracket=bracket, method=\"brentq\").root\n # Given b, now find a better a.\n a = b / (1 / mu - 1)\n return a, b\n\n\ndef _estimate_pdf(p: jnp.ndarray, x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Estimates smooth pdf with Gaussian kernel.\n\n Args:\n p: Samples.\n x: The continuous x space (sorted).\n\n Returns:\n A density vector.\n \"\"\"\n density = sum(stats.norm(xi).pdf(x) for xi in p)\n return density / density.sum()\n\n\ndef _pmf(p: jnp.ndarray, x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Estimates discrete pmf.\n\n Args:\n p: Samples.\n x: The discrete x space (sorted).\n\n Returns:\n A pmf vector.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/45", "ground_truth": " p_cdf = jnp.array([jnp.sum(p <= x[i]) for i in range(len(x))])\n p_pmf = np.concatenate([[p_cdf[0]], jnp.diff(p_cdf)])\n return p_pmf / p_pmf.sum()\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "utils.py"], "context_start_lineno": 0, "lineno": 333, "function_name": "_pmf"}, "groundtruth": " p_cdf = jnp.array([jnp.sum(p <= x[i]) for i in range(len(x))])\n p_pmf = np.concatenate([[p_cdf[0]], jnp.diff(p_cdf)])\n return p_pmf / p_pmf.sum()\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Set of utilities for LightweighMMM package.\"\"\"\nimport pickle\nimport time\nfrom typing import Any, List, Optional, Tuple\n\nfrom absl import logging\nfrom jax import random\nimport jax.numpy as jnp\nimport numpy as np\nimport pandas as pd\nfrom scipy import interpolate\nfrom scipy import optimize\nfrom scipy import spatial\nfrom scipy import stats\nfrom tensorflow.io import gfile\n\nfrom lightweight_mmm import media_transforms\n\n\ndef save_model(\n media_mix_model: Any,\n file_path: str\n ) -> None:\n \"\"\"Saves the given model in the given path.\n\n Args:\n media_mix_model: Model to save on disk.\n file_path: File path where the model should be placed.\n \"\"\"\n with gfile.GFile(file_path, \"wb\") as file:\n pickle.dump(obj=media_mix_model, file=file)\n\n\ndef load_model(file_path: str) -> Any:\n \"\"\"Loads a model given a string path.\n\n Args:\n file_path: Path of the file containing the model.\n\n Returns:\n The LightweightMMM object that was stored in the given path.\n \"\"\"\n with gfile.GFile(file_path, \"rb\") as file:\n media_mix_model = pickle.load(file=file)\n\n for attr in dir(media_mix_model):\n if attr.startswith(\"__\"):\n continue\n attr_value = getattr(media_mix_model, attr)\n if isinstance(attr_value, np.ndarray):\n setattr(media_mix_model, attr, jnp.array(attr_value))\n\n return media_mix_model\n\n\ndef get_time_seed() -> int:\n \"\"\"Generates an integer using the last decimals of time.time().\n\n Returns:\n Integer to be used as seed.\n \"\"\"\n # time.time() has the following format: 1645174953.0429401\n return int(str(time.time()).split(\".\")[1])\n\n\ndef simulate_dummy_data(\n data_size: int,\n n_media_channels: int,\n n_extra_features: int,\n geos: int = 1,\n seed: int = 5\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Simulates dummy data needed for media mix modelling.\n\n This function's goal is to be super simple and not have many parameters,\n although it does not generate a fully realistic dataset is only meant to be\n used for demos/tutorial purposes. Uses carryover for lagging but has no\n saturation and no trend.\n\n The data simulated includes the media data, extra features, a target/KPI and\n costs.\n\n Args:\n data_size: Number of rows to generate.\n n_media_channels: Number of media channels to generate.\n n_extra_features: Number of extra features to generate.\n geos: Number of geos for geo level data (default = 1 for national).\n seed: Random seed.\n\n Returns:\n The simulated media, extra features, target and costs.\n \"\"\"\n if data_size < 1 or n_media_channels < 1 or n_extra_features < 1:\n raise ValueError(\n \"Data size, n_media_channels and n_extra_features must be greater than\"\n \" 0. Please check the values introduced are greater than zero.\")\n data_offset = int(data_size * 0.2)\n data_size += data_offset\n key = random.PRNGKey(seed)\n sub_keys = random.split(key=key, num=7)\n media_data = random.normal(key=sub_keys[0],\n shape=(data_size, n_media_channels)) * 1.5 + 20\n\n extra_features = random.normal(key=sub_keys[1],\n shape=(data_size, n_extra_features)) + 5\n # Reduce the costs to make ROI realistic.\n costs = media_data[data_offset:].sum(axis=0) * .1\n\n seasonality = media_transforms.calculate_seasonality(\n number_periods=data_size,\n degrees=2,\n frequency=52,\n gamma_seasonality=1)\n target_noise = random.normal(key=sub_keys[2], shape=(data_size,)) + 3\n\n # media_data_transformed = media_transforms.adstock(media_data)\n media_data_transformed = media_transforms.carryover(\n data=media_data,\n ad_effect_retention_rate=jnp.full((n_media_channels,), fill_value=.5),\n peak_effect_delay=jnp.full((n_media_channels,), fill_value=1.))\n beta_media = random.normal(key=sub_keys[3], shape=(n_media_channels,)) + 1\n beta_extra_features = random.normal(key=sub_keys[4],\n shape=(n_extra_features,))\n # There is no trend to keep this very simple.\n target = 15 + seasonality + media_data_transformed.dot(\n beta_media) + extra_features.dot(beta_extra_features) + target_noise\n\n logging.info(\"Correlation between transformed media and target\")\n logging.info([\n np.corrcoef(target[data_offset:], media_data_transformed[data_offset:,\n i])[0, 1]\n for i in range(n_media_channels)\n ])\n\n logging.info(\"True ROI for media channels\")\n logging.info([\n sum(media_data_transformed[data_offset:, i] * beta_media[i]) / costs[i]\n for i in range(n_media_channels)\n ])\n\n if geos > 1:\n # Distribute national data to geo and add some more noise.\n weights = random.uniform(key=sub_keys[5], shape=(1, geos))\n weights /= sum(weights)\n target_noise = random.normal(key=sub_keys[6], shape=(data_size, geos)) * .5\n target = target[:, np.newaxis].dot(weights) + target_noise\n media_data = media_data[:, :, np.newaxis].dot(weights)\n extra_features = extra_features[:, :, np.newaxis].dot(weights)\n\n return (media_data[data_offset:], extra_features[data_offset:],\n target[data_offset:], costs)\n\n\ndef _split_array_into_list(\n dataframe: pd.DataFrame,\n split_level_feature: str,\n features: List[str],\n national_model_flag: bool = True) -> List[np.ndarray]:\n \"\"\"Splits data frame into list of jax arrays.\n\n Args:\n dataframe: Dataframe with all the modeling feature.\n split_level_feature: Feature that will be used to split.\n features: List of feature to export from data frame.\n national_model_flag: Whether the data frame is used for national model.\n\n Returns:\n List of jax arrays.\n \"\"\"\n split_level = dataframe[split_level_feature].unique()\n array_list_by_level = [\n dataframe.loc[dataframe[split_level_feature] == level, features].values.T\n for level in split_level\n ]\n feature_array = jnp.stack(array_list_by_level)\n if national_model_flag:\n feature_array = jnp.squeeze(feature_array, axis=2)\n return feature_array\n\n\ndef dataframe_to_jax(\n dataframe: pd.DataFrame,\n media_features: List[str],\n extra_features: List[str],\n date_feature: str,\n target: str,\n geo_feature: Optional[str] = None,\n cost_features: Optional[List[str]] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Converts pandas dataframe to right data format for media mix model.\n\n This function's goal is to convert dataframe which is most familar with data\n scientists to jax arrays to help the users who are not familar with array to\n use the lightweight MMM library easier.\n\n Args:\n dataframe: Dataframe with geo, KPI, media and non-media features.\n media_features: List of media feature names.\n extra_features: List of non media feature names.\n date_feature: Date feature name.\n target: Target variables name.\n geo_feature: Geo feature name and it is optional if the data is at national\n level.\n cost_features: List of media cost variables and it is optional if user\n use actual media cost as their media features in the model.\n\n Returns:\n Media, extra features, target and costs arrays.\n\n Raises:\n ValueError: If each geo has unequal number of weeks or there is only one\n value in the geo feature.\n \"\"\"\n if geo_feature is not None:\n if dataframe[geo_feature].nunique() == 1:\n raise ValueError(\n \"Geo feature has at least two geos or keep default for national model\"\n )\n count_by_geo = dataframe.groupby(\n geo_feature)[date_feature].count().reset_index()\n unique_date_count = count_by_geo[date_feature].nunique()\n if unique_date_count != 1:\n raise ValueError(\"Not all the geos have same number of weeks.\")\n national_model_flag = False\n features_to_sort = [date_feature, geo_feature]\n else:\n national_model_flag = True\n features_to_sort = [date_feature]\n\n df_sorted = dataframe.sort_values(by=features_to_sort)\n media_features_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=media_features,\n national_model_flag=national_model_flag)\n\n extra_features_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=extra_features,\n national_model_flag=national_model_flag)\n\n target_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=[target],\n national_model_flag=national_model_flag)\n target_data = jnp.squeeze(target_data)\n\n if cost_features:\n cost_data = jnp.dot(\n jnp.full(len(dataframe), 1), dataframe[cost_features].values)\n else:\n cost_data = jnp.dot(\n jnp.full(len(dataframe), 1), dataframe[media_features].values)\n return (media_features_data, extra_features_data, target_data, cost_data)# jax-ndarray\n\n\ndef get_halfnormal_mean_from_scale(scale: float) -> float:\n \"\"\"Returns the mean of the half-normal distribition.\"\"\"\n # https://en.wikipedia.org/wiki/Half-normal_distribution\n return scale * np.sqrt(2) / np.sqrt(np.pi)\n\n\ndef get_halfnormal_scale_from_mean(mean: float) -> float:\n \"\"\"Returns the scale of the half-normal distribution.\"\"\"\n # https://en.wikipedia.org/wiki/Half-normal_distribution\n return mean * np.sqrt(np.pi) / np.sqrt(2)\n\n\ndef get_beta_params_from_mu_sigma(mu: float,\n sigma: float,\n bracket: Tuple[float, float] = (.5, 100.)\n ) -> Tuple[float, float]:\n \"\"\"Deterministically estimates (a, b) from (mu, sigma) of a beta variable.\n\n https://en.wikipedia.org/wiki/Beta_distribution\n\n Args:\n mu: The sample mean of the beta distributed variable.\n sigma: The sample standard deviation of the beta distributed variable.\n bracket: Search bracket for b.\n\n Returns:\n Tuple of the (a, b) parameters.\n \"\"\"\n # Assume a = 1 to find b.\n def _f(x):\n return x ** 2 + 4 * x + 5 + 2 / x - 1 / sigma ** 2\n b = optimize.root_scalar(_f, bracket=bracket, method=\"brentq\").root\n # Given b, now find a better a.\n a = b / (1 / mu - 1)\n return a, b\n\n\ndef _estimate_pdf(p: jnp.ndarray, x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Estimates smooth pdf with Gaussian kernel.\n\n Args:\n p: Samples.\n x: The continuous x space (sorted).\n\n Returns:\n A density vector.\n \"\"\"\n density = sum(stats.norm(xi).pdf(x) for xi in p)\n return density / density.sum()\n\n\ndef _pmf(p: jnp.ndarray, x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Estimates discrete pmf.\n\n Args:\n p: Samples.\n x: The discrete x space (sorted).\n\n Returns:\n A pmf vector.\n \"\"\"\n p_cdf = jnp.array([jnp.sum(p <= x[i]) for i in range(len(x))])\n p_pmf = np.concatenate([[p_cdf[0]], jnp.diff(p_cdf)])\n return p_pmf / p_pmf.sum()\n\n\ndef distance_pior_posterior(p: jnp.ndarray, q: jnp.ndarray, method: str = \"KS\",\n discrete: bool = True) -> float:\n \"\"\"Quantifies the distance between two distributions.\n\n Note we do not use KL divergence because it's not defined when a probability\n is 0.\n\n https://en.wikipedia.org/wiki/Hellinger_distance\n\n Args:\n p: Samples for distribution 1.\n q: Samples for distribution 2.\n method: We can have four methods: KS, Hellinger, JS and min.\n discrete: Whether input data is discrete or continuous.\n\n Returns:\n The distance metric (between 0 and 1).\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/46", "ground_truth": " if method == \"KS\":\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html\n return stats.ks_2samp(p, q).statistic\n elif method in [\"Hellinger\", \"JS\", \"min\"]:\n if discrete:\n x = jnp.unique(jnp.concatenate((p, q)))\n p_pdf = _pmf(p, x)\n q_pdf = _pmf(q, x)\n else:\n minx, maxx = min(p.min(), q.min()), max(p.max(), q.max())\n x = np.linspace(minx, maxx, 100)\n p_pdf = _estimate_pdf(p, x)\n q_pdf = _estimate_pdf(q, x)\n if method == \"Hellinger\":\n return np.sqrt(jnp.sum((np.sqrt(p_pdf) - np.sqrt(q_pdf)) ** 2)) / np.sqrt(2)\n elif method == \"JS\":\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.jensenshannon.html\n return spatial.distance.jensenshannon(p_pdf, q_pdf)\n else:\n return 1 - np.minimum(p_pdf, q_pdf).sum()\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "utils.py"], "context_start_lineno": 0, "lineno": 357, "function_name": "distance_pior_posterior"}, "groundtruth": " if method == \"KS\":\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html\n return stats.ks_2samp(p, q).statistic\n elif method in [\"Hellinger\", \"JS\", \"min\"]:\n if discrete:\n x = jnp.unique(jnp.concatenate((p, q)))\n p_pdf = _pmf(p, x)\n q_pdf = _pmf(q, x)\n else:\n minx, maxx = min(p.min(), q.min()), max(p.max(), q.max())\n x = np.linspace(minx, maxx, 100)\n p_pdf = _estimate_pdf(p, x)\n q_pdf = _estimate_pdf(q, x)\n if method == \"Hellinger\":\n return np.sqrt(jnp.sum((np.sqrt(p_pdf) - np.sqrt(q_pdf)) ** 2)) / np.sqrt(2)\n elif method == \"JS\":\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.jensenshannon.html\n return spatial.distance.jensenshannon(p_pdf, q_pdf)\n else:\n return 1 - np.minimum(p_pdf, q_pdf).sum()\n"} +{"prompt": "\ndef save_model(\n media_mix_model: Any,\n file_path: str\n ) -> None:\n \"\"\"Saves the given model in the given path.\n\n Args:\n media_mix_model: Model to save on disk.\n file_path: File path where the model should be placed.\n \"\"\"\n with gfile.GFile(file_path, \"wb\") as file:\n pickle.dump(obj=media_mix_model, file=file)\n\n\ndef load_model(file_path: str) -> Any:\n \"\"\"Loads a model given a string path.\n\n Args:\n file_path: Path of the file containing the model.\n\n Returns:\n The LightweightMMM object that was stored in the given path.\n \"\"\"\n with gfile.GFile(file_path, \"rb\") as file:\n media_mix_model = pickle.load(file=file)\n\n for attr in dir(media_mix_model):\n if attr.startswith(\"__\"):\n continue\n attr_value = getattr(media_mix_model, attr)\n if isinstance(attr_value, np.ndarray):\n setattr(media_mix_model, attr, jnp.array(attr_value))\n\n return media_mix_model\n\n\ndef get_time_seed() -> int:\n \"\"\"Generates an integer using the last decimals of time.time().\n\n Returns:\n Integer to be used as seed.\n \"\"\"\n # time.time() has the following format: 1645174953.0429401\n return int(str(time.time()).split(\".\")[1])\n\n\ndef simulate_dummy_data(\n data_size: int,\n n_media_channels: int,\n n_extra_features: int,\n geos: int = 1,\n seed: int = 5\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Simulates dummy data needed for media mix modelling.\n\n This function's goal is to be super simple and not have many parameters,\n although it does not generate a fully realistic dataset is only meant to be\n used for demos/tutorial purposes. Uses carryover for lagging but has no\n saturation and no trend.\n\n The data simulated includes the media data, extra features, a target/KPI and\n costs.\n\n Args:\n data_size: Number of rows to generate.\n n_media_channels: Number of media channels to generate.\n n_extra_features: Number of extra features to generate.\n geos: Number of geos for geo level data (default = 1 for national).\n seed: Random seed.\n\n Returns:\n The simulated media, extra features, target and costs.\n \"\"\"\n if data_size < 1 or n_media_channels < 1 or n_extra_features < 1:\n raise ValueError(\n \"Data size, n_media_channels and n_extra_features must be greater than\"\n \" 0. Please check the values introduced are greater than zero.\")\n data_offset = int(data_size * 0.2)\n data_size += data_offset\n key = random.PRNGKey(seed)\n sub_keys = random.split(key=key, num=7)\n media_data = random.normal(key=sub_keys[0],\n shape=(data_size, n_media_channels)) * 1.5 + 20\n\n extra_features = random.normal(key=sub_keys[1],\n shape=(data_size, n_extra_features)) + 5\n # Reduce the costs to make ROI realistic.\n costs = media_data[data_offset:].sum(axis=0) * .1\n\n seasonality = media_transforms.calculate_seasonality(\n number_periods=data_size,\n degrees=2,\n frequency=52,\n gamma_seasonality=1)\n target_noise = random.normal(key=sub_keys[2], shape=(data_size,)) + 3\n\n # media_data_transformed = media_transforms.adstock(media_data)\n media_data_transformed = media_transforms.carryover(\n data=media_data,\n ad_effect_retention_rate=jnp.full((n_media_channels,), fill_value=.5),\n peak_effect_delay=jnp.full((n_media_channels,), fill_value=1.))\n beta_media = random.normal(key=sub_keys[3], shape=(n_media_channels,)) + 1\n beta_extra_features = random.normal(key=sub_keys[4],\n shape=(n_extra_features,))\n # There is no trend to keep this very simple.\n target = 15 + seasonality + media_data_transformed.dot(\n beta_media) + extra_features.dot(beta_extra_features) + target_noise\n\n logging.info(\"Correlation between transformed media and target\")\n logging.info([\n np.corrcoef(target[data_offset:], media_data_transformed[data_offset:,\n i])[0, 1]\n for i in range(n_media_channels)\n ])\n\n logging.info(\"True ROI for media channels\")\n logging.info([\n sum(media_data_transformed[data_offset:, i] * beta_media[i]) / costs[i]\n for i in range(n_media_channels)\n ])\n\n if geos > 1:\n # Distribute national data to geo and add some more noise.\n weights = random.uniform(key=sub_keys[5], shape=(1, geos))\n weights /= sum(weights)\n target_noise = random.normal(key=sub_keys[6], shape=(data_size, geos)) * .5\n target = target[:, np.newaxis].dot(weights) + target_noise\n media_data = media_data[:, :, np.newaxis].dot(weights)\n extra_features = extra_features[:, :, np.newaxis].dot(weights)\n\n return (media_data[data_offset:], extra_features[data_offset:],\n target[data_offset:], costs)\n\n\ndef _split_array_into_list(\n dataframe: pd.DataFrame,\n split_level_feature: str,\n features: List[str],\n national_model_flag: bool = True) -> List[np.ndarray]:\n \"\"\"Splits data frame into list of jax arrays.\n\n Args:\n dataframe: Dataframe with all the modeling feature.\n split_level_feature: Feature that will be used to split.\n features: List of feature to export from data frame.\n national_model_flag: Whether the data frame is used for national model.\n\n Returns:\n List of jax arrays.\n \"\"\"\n split_level = dataframe[split_level_feature].unique()\n array_list_by_level = [\n dataframe.loc[dataframe[split_level_feature] == level, features].values.T\n for level in split_level\n ]\n feature_array = jnp.stack(array_list_by_level)\n if national_model_flag:\n feature_array = jnp.squeeze(feature_array, axis=2)\n return feature_array\n\n\ndef dataframe_to_jax(\n dataframe: pd.DataFrame,\n media_features: List[str],\n extra_features: List[str],\n date_feature: str,\n target: str,\n geo_feature: Optional[str] = None,\n cost_features: Optional[List[str]] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n \"\"\"Converts pandas dataframe to right data format for media mix model.\n\n This function's goal is to convert dataframe which is most familar with data\n scientists to jax arrays to help the users who are not familar with array to\n use the lightweight MMM library easier.\n\n Args:\n dataframe: Dataframe with geo, KPI, media and non-media features.\n media_features: List of media feature names.\n extra_features: List of non media feature names.\n date_feature: Date feature name.\n target: Target variables name.\n geo_feature: Geo feature name and it is optional if the data is at national\n level.\n cost_features: List of media cost variables and it is optional if user\n use actual media cost as their media features in the model.\n\n Returns:\n Media, extra features, target and costs arrays.\n\n Raises:\n ValueError: If each geo has unequal number of weeks or there is only one\n value in the geo feature.\n \"\"\"\n if geo_feature is not None:\n if dataframe[geo_feature].nunique() == 1:\n raise ValueError(\n \"Geo feature has at least two geos or keep default for national model\"\n )\n count_by_geo = dataframe.groupby(\n geo_feature)[date_feature].count().reset_index()\n unique_date_count = count_by_geo[date_feature].nunique()\n if unique_date_count != 1:\n raise ValueError(\"Not all the geos have same number of weeks.\")\n national_model_flag = False\n features_to_sort = [date_feature, geo_feature]\n else:\n national_model_flag = True\n features_to_sort = [date_feature]\n\n df_sorted = dataframe.sort_values(by=features_to_sort)\n media_features_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=media_features,\n national_model_flag=national_model_flag)\n\n extra_features_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=extra_features,\n national_model_flag=national_model_flag)\n\n target_data = _split_array_into_list(\n dataframe=df_sorted,\n split_level_feature=date_feature,\n features=[target],\n national_model_flag=national_model_flag)\n target_data = jnp.squeeze(target_data)\n\n if cost_features:\n cost_data = jnp.dot(\n jnp.full(len(dataframe), 1), dataframe[cost_features].values)\n else:\n cost_data = jnp.dot(\n jnp.full(len(dataframe), 1), dataframe[media_features].values)\n return (media_features_data, extra_features_data, target_data, cost_data)# jax-ndarray\n\n\ndef get_halfnormal_mean_from_scale(scale: float) -> float:\n \"\"\"Returns the mean of the half-normal distribition.\"\"\"\n # https://en.wikipedia.org/wiki/Half-normal_distribution\n return scale * np.sqrt(2) / np.sqrt(np.pi)\n\n\ndef get_halfnormal_scale_from_mean(mean: float) -> float:\n \"\"\"Returns the scale of the half-normal distribution.\"\"\"\n # https://en.wikipedia.org/wiki/Half-normal_distribution\n return mean * np.sqrt(np.pi) / np.sqrt(2)\n\n\ndef get_beta_params_from_mu_sigma(mu: float,\n sigma: float,\n bracket: Tuple[float, float] = (.5, 100.)\n ) -> Tuple[float, float]:\n \"\"\"Deterministically estimates (a, b) from (mu, sigma) of a beta variable.\n\n https://en.wikipedia.org/wiki/Beta_distribution\n\n Args:\n mu: The sample mean of the beta distributed variable.\n sigma: The sample standard deviation of the beta distributed variable.\n bracket: Search bracket for b.\n\n Returns:\n Tuple of the (a, b) parameters.\n \"\"\"\n # Assume a = 1 to find b.\n def _f(x):\n return x ** 2 + 4 * x + 5 + 2 / x - 1 / sigma ** 2\n b = optimize.root_scalar(_f, bracket=bracket, method=\"brentq\").root\n # Given b, now find a better a.\n a = b / (1 / mu - 1)\n return a, b\n\n\ndef _estimate_pdf(p: jnp.ndarray, x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Estimates smooth pdf with Gaussian kernel.\n\n Args:\n p: Samples.\n x: The continuous x space (sorted).\n\n Returns:\n A density vector.\n \"\"\"\n density = sum(stats.norm(xi).pdf(x) for xi in p)\n return density / density.sum()\n\n\ndef _pmf(p: jnp.ndarray, x: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Estimates discrete pmf.\n\n Args:\n p: Samples.\n x: The discrete x space (sorted).\n\n Returns:\n A pmf vector.\n \"\"\"\n p_cdf = jnp.array([jnp.sum(p <= x[i]) for i in range(len(x))])\n p_pmf = np.concatenate([[p_cdf[0]], jnp.diff(p_cdf)])\n return p_pmf / p_pmf.sum()\n\n\ndef distance_pior_posterior(p: jnp.ndarray, q: jnp.ndarray, method: str = \"KS\",\n discrete: bool = True) -> float:\n \"\"\"Quantifies the distance between two distributions.\n\n Note we do not use KL divergence because it's not defined when a probability\n is 0.\n\n https://en.wikipedia.org/wiki/Hellinger_distance\n\n Args:\n p: Samples for distribution 1.\n q: Samples for distribution 2.\n method: We can have four methods: KS, Hellinger, JS and min.\n discrete: Whether input data is discrete or continuous.\n\n Returns:\n The distance metric (between 0 and 1).\n \"\"\"\n\n if method == \"KS\":\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ks_2samp.html\n return stats.ks_2samp(p, q).statistic\n elif method in [\"Hellinger\", \"JS\", \"min\"]:\n if discrete:\n x = jnp.unique(jnp.concatenate((p, q)))\n p_pdf = _pmf(p, x)\n q_pdf = _pmf(q, x)\n else:\n minx, maxx = min(p.min(), q.min()), max(p.max(), q.max())\n x = np.linspace(minx, maxx, 100)\n p_pdf = _estimate_pdf(p, x)\n q_pdf = _estimate_pdf(q, x)\n if method == \"Hellinger\":\n return np.sqrt(jnp.sum((np.sqrt(p_pdf) - np.sqrt(q_pdf)) ** 2)) / np.sqrt(2)\n elif method == \"JS\":\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.jensenshannon.html\n return spatial.distance.jensenshannon(p_pdf, q_pdf)\n else:\n return 1 - np.minimum(p_pdf, q_pdf).sum()\n\n\ndef interpolate_outliers(x: jnp.ndarray,\n outlier_idx: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Overwrites outliers in x with interpolated values.\n\n Args:\n x: The original univariate variable with outliers.\n outlier_idx: Indices of the outliers in x.\n\n Returns:\n A cleaned x with outliers overwritten.\n\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/47", "ground_truth": " time_idx = jnp.arange(len(x))\n inverse_idx = jnp.array([i for i in range(len(x)) if i not in outlier_idx])\n interp_func = interpolate.interp1d(\n time_idx[inverse_idx], x[inverse_idx], kind=\"linear\")\n x = x.at[outlier_idx].set(interp_func(time_idx[outlier_idx]))\n return x\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "utils.py"], "context_start_lineno": 32, "lineno": 391, "function_name": "interpolate_outliers"}, "groundtruth": " time_idx = jnp.arange(len(x))\n inverse_idx = jnp.array([i for i in range(len(x)) if i not in outlier_idx])\n interp_func = interpolate.interp1d(\n time_idx[inverse_idx], x[inverse_idx], kind=\"linear\")\n x = x.at[outlier_idx].set(interp_func(time_idx[outlier_idx]))\n return x\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for models.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import handlers\n\nfrom lightweight_mmm import models\n\n\nclass ModelsTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n dict(testcase_name=\"one_channel\", shape=(10, 1)),\n dict(testcase_name=\"five_channel\", shape=(10, 5)),\n dict(testcase_name=\"same_channels_as_rows\", shape=(10, 10)),\n dict(testcase_name=\"geo_shape_1\", shape=(10, 10, 5)),\n dict(testcase_name=\"geo_shape_2\", shape=(10, 5, 2)),\n dict(testcase_name=\"one_channel_one_row\", shape=(1, 1)))\n def test_transform_adstock_produces_correct_output_shape(self, shape):\n\n def mock_model_function(media_data):", "metadata": {"task_id": "google--lightweight_mmm/48", "ground_truth": " numpyro.deterministic(\n \"transformed_media\",\n models.transform_adstock(media_data, custom_priors={}))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "models_test.py"], "context_start_lineno": 0, "lineno": 39, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"transformed_media\",\n models.transform_adstock(media_data, custom_priors={}))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for models.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import handlers\n\nfrom lightweight_mmm import models\n\n\nclass ModelsTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n dict(testcase_name=\"one_channel\", shape=(10, 1)),\n dict(testcase_name=\"five_channel\", shape=(10, 5)),\n dict(testcase_name=\"same_channels_as_rows\", shape=(10, 10)),\n dict(testcase_name=\"geo_shape_1\", shape=(10, 10, 5)),\n dict(testcase_name=\"geo_shape_2\", shape=(10, 5, 2)),\n dict(testcase_name=\"one_channel_one_row\", shape=(1, 1)))\n def test_transform_adstock_produces_correct_output_shape(self, shape):\n\n def mock_model_function(media_data):\n numpyro.deterministic(\n \"transformed_media\",\n models.transform_adstock(media_data, custom_priors={}))\n\n media = jnp.ones(shape)\n kernel = numpyro.infer.NUTS(model=mock_model_function)\n mcmc = numpyro.infer.MCMC(\n sampler=kernel, num_warmup=10, num_samples=10, num_chains=1)\n rng_key = jax.random.PRNGKey(0)\n\n mcmc.run(rng_key, media_data=media)\n transformed_media = mcmc.get_samples()[\"transformed_media\"].mean(axis=0)\n\n self.assertEqual(media.shape, transformed_media.shape)\n\n @parameterized.named_parameters(\n dict(testcase_name=\"one_channel\", shape=(10, 1)),\n dict(testcase_name=\"five_channel\", shape=(10, 5)),\n dict(testcase_name=\"same_channels_as_rows\", shape=(10, 10)),\n dict(testcase_name=\"geo_shape_1\", shape=(10, 10, 5)),\n dict(testcase_name=\"geo_shape_2\", shape=(10, 5, 2)),\n dict(testcase_name=\"one_channel_one_row\", shape=(1, 1)))\n def test_transform_hill_adstock_produces_correct_output_shape(self, shape):\n\n def mock_model_function(media_data):", "metadata": {"task_id": "google--lightweight_mmm/49", "ground_truth": " numpyro.deterministic(\n \"transformed_media\",\n models.transform_hill_adstock(media_data, custom_priors={}))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "models_test.py"], "context_start_lineno": 0, "lineno": 64, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"transformed_media\",\n models.transform_hill_adstock(media_data, custom_priors={}))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for models.\"\"\"\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import handlers\n\nfrom lightweight_mmm import models\n\n\nclass ModelsTest(parameterized.TestCase):\n\n @parameterized.named_parameters(\n dict(testcase_name=\"one_channel\", shape=(10, 1)),\n dict(testcase_name=\"five_channel\", shape=(10, 5)),\n dict(testcase_name=\"same_channels_as_rows\", shape=(10, 10)),\n dict(testcase_name=\"geo_shape_1\", shape=(10, 10, 5)),\n dict(testcase_name=\"geo_shape_2\", shape=(10, 5, 2)),\n dict(testcase_name=\"one_channel_one_row\", shape=(1, 1)))\n def test_transform_adstock_produces_correct_output_shape(self, shape):\n\n def mock_model_function(media_data):\n numpyro.deterministic(\n \"transformed_media\",\n models.transform_adstock(media_data, custom_priors={}))\n\n media = jnp.ones(shape)\n kernel = numpyro.infer.NUTS(model=mock_model_function)\n mcmc = numpyro.infer.MCMC(\n sampler=kernel, num_warmup=10, num_samples=10, num_chains=1)\n rng_key = jax.random.PRNGKey(0)\n\n mcmc.run(rng_key, media_data=media)\n transformed_media = mcmc.get_samples()[\"transformed_media\"].mean(axis=0)\n\n self.assertEqual(media.shape, transformed_media.shape)\n\n @parameterized.named_parameters(\n dict(testcase_name=\"one_channel\", shape=(10, 1)),\n dict(testcase_name=\"five_channel\", shape=(10, 5)),\n dict(testcase_name=\"same_channels_as_rows\", shape=(10, 10)),\n dict(testcase_name=\"geo_shape_1\", shape=(10, 10, 5)),\n dict(testcase_name=\"geo_shape_2\", shape=(10, 5, 2)),\n dict(testcase_name=\"one_channel_one_row\", shape=(1, 1)))\n def test_transform_hill_adstock_produces_correct_output_shape(self, shape):\n\n def mock_model_function(media_data):\n numpyro.deterministic(\n \"transformed_media\",\n models.transform_hill_adstock(media_data, custom_priors={}))\n\n media = jnp.ones(shape)\n kernel = numpyro.infer.NUTS(model=mock_model_function)\n mcmc = numpyro.infer.MCMC(\n sampler=kernel, num_warmup=10, num_samples=10, num_chains=1)\n rng_key = jax.random.PRNGKey(0)\n\n mcmc.run(rng_key, media_data=media)\n transformed_media = mcmc.get_samples()[\"transformed_media\"].mean(axis=0)\n\n self.assertEqual(media.shape, transformed_media.shape)\n\n @parameterized.named_parameters(\n dict(testcase_name=\"one_channel\", shape=(10, 1)),\n dict(testcase_name=\"five_channel\", shape=(10, 5)),\n dict(testcase_name=\"same_channels_as_rows\", shape=(10, 10)),\n dict(testcase_name=\"geo_shape_1\", shape=(10, 10, 5)),\n dict(testcase_name=\"geo_shape_2\", shape=(10, 5, 2)),\n dict(testcase_name=\"one_channel_one_row\", shape=(1, 1)))\n def test_transform_carryover_produces_correct_output_shape(self, shape):\n\n def mock_model_function(media_data):", "metadata": {"task_id": "google--lightweight_mmm/50", "ground_truth": " numpyro.deterministic(\n \"transformed_media\",\n models.transform_carryover(media_data, custom_priors={}))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "models_test.py"], "context_start_lineno": 0, "lineno": 89, "function_name": "mock_model_function"}, "groundtruth": " numpyro.deterministic(\n \"transformed_media\",\n models.transform_carryover(media_data, custom_priors={}))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for optimize_media.\"\"\"\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import optimize_media\nfrom lightweight_mmm import preprocessing\n\n\nclass OptimizeMediaTest(parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):", "metadata": {"task_id": "google--lightweight_mmm/51", "ground_truth": " super(OptimizeMediaTest, cls).setUpClass()\n cls.national_mmm = lightweight_mmm.LightweightMMM()\n cls.national_mmm.fit(\n media=jnp.ones((50, 5)),\n target=jnp.ones(50),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.geo_mmm = lightweight_mmm.LightweightMMM()\n cls.geo_mmm.fit(\n media=jnp.ones((50, 5, 3)),\n target=jnp.ones((50, 3)),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "optimize_media_test.py"], "context_start_lineno": 0, "lineno": 32, "function_name": "setUpClass"}, "groundtruth": " super(OptimizeMediaTest, cls).setUpClass()\n cls.national_mmm = lightweight_mmm.LightweightMMM()\n cls.national_mmm.fit(\n media=jnp.ones((50, 5)),\n target=jnp.ones(50),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.geo_mmm = lightweight_mmm.LightweightMMM()\n cls.geo_mmm.fit(\n media=jnp.ones((50, 5, 3)),\n target=jnp.ones((50, 3)),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for optimize_media.\"\"\"\nfrom unittest import mock\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import optimize_media\nfrom lightweight_mmm import preprocessing\n\n\nclass OptimizeMediaTest(parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(OptimizeMediaTest, cls).setUpClass()\n cls.national_mmm = lightweight_mmm.LightweightMMM()\n cls.national_mmm.fit(\n media=jnp.ones((50, 5)),\n target=jnp.ones(50),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n cls.geo_mmm = lightweight_mmm.LightweightMMM()\n cls.geo_mmm.fit(\n media=jnp.ones((50, 5, 3)),\n target=jnp.ones((50, 3)),\n media_prior=jnp.ones(5) * 50,\n number_warmup=2,\n number_samples=2,\n number_chains=1)\n\n def setUp(self):", "metadata": {"task_id": "google--lightweight_mmm/52", "ground_truth": " super().setUp()\n self.mock_minimize = self.enter_context(\n mock.patch.object(optimize_media.optimize, \"minimize\", autospec=True))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "optimize_media_test.py"], "context_start_lineno": 0, "lineno": 51, "function_name": "setUp"}, "groundtruth": " super().setUp()\n self.mock_minimize = self.enter_context(\n mock.patch.object(optimize_media.optimize, \"minimize\", autospec=True))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Media transformations for accounting for lagging or media effects.\"\"\"\n\nimport functools\nfrom typing import Union\n\nimport jax\nimport jax.numpy as jnp\n\n\n@functools.partial(jax.jit, static_argnums=[0, 1])\ndef calculate_seasonality(\n number_periods: int,\n degrees: int,\n gamma_seasonality: Union[int, float, jnp.ndarray],\n frequency: int = 52,\n) -> jnp.ndarray:\n \"\"\"Calculates cyclic variation seasonality using Fourier terms.\n\n For detailed info check:\n https://en.wikipedia.org/wiki/Seasonality#Modeling\n\n Args:\n number_periods: Number of seasonal periods in the data. Eg. for 1 year of\n seasonal data it will be 52, for 3 years of the same kind 156.\n degrees: Number of degrees to use. Must be greater or equal than 1.\n gamma_seasonality: Factor to multiply to each degree calculation. Shape must\n be aligned with the number of degrees.\n frequency: Frequency of the seasonality being computed. By default is 52 for\n weekly data (52 weeks in a year).\n\n Returns:\n An array with the seasonality values.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/53", "ground_truth": " seasonality_range = jnp.expand_dims(a=jnp.arange(number_periods), axis=-1)\n degrees_range = jnp.arange(1, degrees+1)\n inner_value = seasonality_range * 2 * jnp.pi * degrees_range / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return (season_matrix * gamma_seasonality).sum(axis=2).sum(axis=1)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "media_transforms.py"], "context_start_lineno": 0, "lineno": 48, "function_name": "calculate_seasonality"}, "groundtruth": " seasonality_range = jnp.expand_dims(a=jnp.arange(number_periods), axis=-1)\n degrees_range = jnp.arange(1, degrees+1)\n inner_value = seasonality_range * 2 * jnp.pi * degrees_range / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return (season_matrix * gamma_seasonality).sum(axis=2).sum(axis=1)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Media transformations for accounting for lagging or media effects.\"\"\"\n\nimport functools\nfrom typing import Union\n\nimport jax\nimport jax.numpy as jnp\n\n\n@functools.partial(jax.jit, static_argnums=[0, 1])\ndef calculate_seasonality(\n number_periods: int,\n degrees: int,\n gamma_seasonality: Union[int, float, jnp.ndarray],\n frequency: int = 52,\n) -> jnp.ndarray:\n \"\"\"Calculates cyclic variation seasonality using Fourier terms.\n\n For detailed info check:\n https://en.wikipedia.org/wiki/Seasonality#Modeling\n\n Args:\n number_periods: Number of seasonal periods in the data. Eg. for 1 year of\n seasonal data it will be 52, for 3 years of the same kind 156.\n degrees: Number of degrees to use. Must be greater or equal than 1.\n gamma_seasonality: Factor to multiply to each degree calculation. Shape must\n be aligned with the number of degrees.\n frequency: Frequency of the seasonality being computed. By default is 52 for\n weekly data (52 weeks in a year).\n\n Returns:\n An array with the seasonality values.\n \"\"\"\n\n seasonality_range = jnp.expand_dims(a=jnp.arange(number_periods), axis=-1)\n degrees_range = jnp.arange(1, degrees+1)\n inner_value = seasonality_range * 2 * jnp.pi * degrees_range / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return (season_matrix * gamma_seasonality).sum(axis=2).sum(axis=1)\n\n\n@jax.jit\ndef adstock(data: jnp.ndarray,\n lag_weight: float = .9,\n normalise: bool = True) -> jnp.ndarray:\n \"\"\"Calculates the adstock value of a given array.\n\n To learn more about advertising lag:\n https://en.wikipedia.org/wiki/Advertising_adstock\n\n Args:\n data: Input array.\n lag_weight: lag_weight effect of the adstock function. Default is 0.9.\n normalise: Whether to normalise the output value. This normalization will\n divide the output values by (1 / (1 - lag_weight)).\n\n Returns:\n The adstock output of the input array.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/54", "ground_truth": " def adstock_internal(prev_adstock: jnp.ndarray,\n data: jnp.ndarray,\n lag_weight: float = lag_weight) -> jnp.ndarray:\n adstock_value = prev_adstock * lag_weight + data\n return adstock_value, adstock_value# jax-ndarray\n\n _, adstock_values = jax.lax.scan(\n f=adstock_internal, init=data[0, ...], xs=data[1:, ...])\n adstock_values = jnp.concatenate([jnp.array([data[0, ...]]), adstock_values])\n return jax.lax.cond(\n normalise,\n lambda adstock_values: adstock_values / (1. / (1 - lag_weight)),\n lambda adstock_values: adstock_values,\n operand=adstock_values)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "media_transforms.py"], "context_start_lineno": 0, "lineno": 80, "function_name": "adstock"}, "groundtruth": " def adstock_internal(prev_adstock: jnp.ndarray,\n data: jnp.ndarray,\n lag_weight: float = lag_weight) -> jnp.ndarray:\n adstock_value = prev_adstock * lag_weight + data\n return adstock_value, adstock_value# jax-ndarray\n\n _, adstock_values = jax.lax.scan(\n f=adstock_internal, init=data[0, ...], xs=data[1:, ...])\n adstock_values = jnp.concatenate([jnp.array([data[0, ...]]), adstock_values])\n return jax.lax.cond(\n normalise,\n lambda adstock_values: adstock_values / (1. / (1 - lag_weight)),\n lambda adstock_values: adstock_values,\n operand=adstock_values)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Media transformations for accounting for lagging or media effects.\"\"\"\n\nimport functools\nfrom typing import Union\n\nimport jax\nimport jax.numpy as jnp\n\n\n@functools.partial(jax.jit, static_argnums=[0, 1])\ndef calculate_seasonality(\n number_periods: int,\n degrees: int,\n gamma_seasonality: Union[int, float, jnp.ndarray],\n frequency: int = 52,\n) -> jnp.ndarray:\n \"\"\"Calculates cyclic variation seasonality using Fourier terms.\n\n For detailed info check:\n https://en.wikipedia.org/wiki/Seasonality#Modeling\n\n Args:\n number_periods: Number of seasonal periods in the data. Eg. for 1 year of\n seasonal data it will be 52, for 3 years of the same kind 156.\n degrees: Number of degrees to use. Must be greater or equal than 1.\n gamma_seasonality: Factor to multiply to each degree calculation. Shape must\n be aligned with the number of degrees.\n frequency: Frequency of the seasonality being computed. By default is 52 for\n weekly data (52 weeks in a year).\n\n Returns:\n An array with the seasonality values.\n \"\"\"\n\n seasonality_range = jnp.expand_dims(a=jnp.arange(number_periods), axis=-1)\n degrees_range = jnp.arange(1, degrees+1)\n inner_value = seasonality_range * 2 * jnp.pi * degrees_range / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return (season_matrix * gamma_seasonality).sum(axis=2).sum(axis=1)\n\n\n@jax.jit\ndef adstock(data: jnp.ndarray,\n lag_weight: float = .9,\n normalise: bool = True) -> jnp.ndarray:\n \"\"\"Calculates the adstock value of a given array.\n\n To learn more about advertising lag:\n https://en.wikipedia.org/wiki/Advertising_adstock\n\n Args:\n data: Input array.\n lag_weight: lag_weight effect of the adstock function. Default is 0.9.\n normalise: Whether to normalise the output value. This normalization will\n divide the output values by (1 / (1 - lag_weight)).\n\n Returns:\n The adstock output of the input array.\n \"\"\"\n\n def adstock_internal(prev_adstock: jnp.ndarray,\n data: jnp.ndarray,\n lag_weight: float = lag_weight) -> jnp.ndarray:\n adstock_value = prev_adstock * lag_weight + data\n return adstock_value, adstock_value# jax-ndarray\n\n _, adstock_values = jax.lax.scan(\n f=adstock_internal, init=data[0, ...], xs=data[1:, ...])\n adstock_values = jnp.concatenate([jnp.array([data[0, ...]]), adstock_values])\n return jax.lax.cond(\n normalise,\n lambda adstock_values: adstock_values / (1. / (1 - lag_weight)),\n lambda adstock_values: adstock_values,\n operand=adstock_values)\n\n\n@jax.jit\ndef hill(data: jnp.ndarray, half_max_effective_concentration: jnp.ndarray,\n slope: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Calculates the hill function for a given array of values.\n\n Refer to the following link for detailed information on this equation:\n https://en.wikipedia.org/wiki/Hill_equation_(biochemistry)\n\n Args:\n data: Input data.\n half_max_effective_concentration: ec50 value for the hill function.\n slope: Slope of the hill function.\n\n Returns:\n The hill values for the respective input data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/55", "ground_truth": " save_transform = apply_exponent_safe(\n data=data / half_max_effective_concentration, exponent=-slope)\n return jnp.where(save_transform == 0, x=0, y=1. / (1 + save_transform))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "media_transforms.py"], "context_start_lineno": 0, "lineno": 112, "function_name": "hill"}, "groundtruth": " save_transform = apply_exponent_safe(\n data=data / half_max_effective_concentration, exponent=-slope)\n return jnp.where(save_transform == 0, x=0, y=1. / (1 + save_transform))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Media transformations for accounting for lagging or media effects.\"\"\"\n\nimport functools\nfrom typing import Union\n\nimport jax\nimport jax.numpy as jnp\n\n\n@functools.partial(jax.jit, static_argnums=[0, 1])\ndef calculate_seasonality(\n number_periods: int,\n degrees: int,\n gamma_seasonality: Union[int, float, jnp.ndarray],\n frequency: int = 52,\n) -> jnp.ndarray:\n \"\"\"Calculates cyclic variation seasonality using Fourier terms.\n\n For detailed info check:\n https://en.wikipedia.org/wiki/Seasonality#Modeling\n\n Args:\n number_periods: Number of seasonal periods in the data. Eg. for 1 year of\n seasonal data it will be 52, for 3 years of the same kind 156.\n degrees: Number of degrees to use. Must be greater or equal than 1.\n gamma_seasonality: Factor to multiply to each degree calculation. Shape must\n be aligned with the number of degrees.\n frequency: Frequency of the seasonality being computed. By default is 52 for\n weekly data (52 weeks in a year).\n\n Returns:\n An array with the seasonality values.\n \"\"\"\n\n seasonality_range = jnp.expand_dims(a=jnp.arange(number_periods), axis=-1)\n degrees_range = jnp.arange(1, degrees+1)\n inner_value = seasonality_range * 2 * jnp.pi * degrees_range / frequency\n season_matrix_sin = jnp.sin(inner_value)\n season_matrix_cos = jnp.cos(inner_value)\n season_matrix = jnp.concatenate([\n jnp.expand_dims(a=season_matrix_sin, axis=-1),\n jnp.expand_dims(a=season_matrix_cos, axis=-1)\n ],\n axis=-1)\n return (season_matrix * gamma_seasonality).sum(axis=2).sum(axis=1)\n\n\n@jax.jit\ndef adstock(data: jnp.ndarray,\n lag_weight: float = .9,\n normalise: bool = True) -> jnp.ndarray:\n \"\"\"Calculates the adstock value of a given array.\n\n To learn more about advertising lag:\n https://en.wikipedia.org/wiki/Advertising_adstock\n\n Args:\n data: Input array.\n lag_weight: lag_weight effect of the adstock function. Default is 0.9.\n normalise: Whether to normalise the output value. This normalization will\n divide the output values by (1 / (1 - lag_weight)).\n\n Returns:\n The adstock output of the input array.\n \"\"\"\n\n def adstock_internal(prev_adstock: jnp.ndarray,\n data: jnp.ndarray,\n lag_weight: float = lag_weight) -> jnp.ndarray:\n adstock_value = prev_adstock * lag_weight + data\n return adstock_value, adstock_value# jax-ndarray\n\n _, adstock_values = jax.lax.scan(\n f=adstock_internal, init=data[0, ...], xs=data[1:, ...])\n adstock_values = jnp.concatenate([jnp.array([data[0, ...]]), adstock_values])\n return jax.lax.cond(\n normalise,\n lambda adstock_values: adstock_values / (1. / (1 - lag_weight)),\n lambda adstock_values: adstock_values,\n operand=adstock_values)\n\n\n@jax.jit\ndef hill(data: jnp.ndarray, half_max_effective_concentration: jnp.ndarray,\n slope: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Calculates the hill function for a given array of values.\n\n Refer to the following link for detailed information on this equation:\n https://en.wikipedia.org/wiki/Hill_equation_(biochemistry)\n\n Args:\n data: Input data.\n half_max_effective_concentration: ec50 value for the hill function.\n slope: Slope of the hill function.\n\n Returns:\n The hill values for the respective input data.\n \"\"\"\n save_transform = apply_exponent_safe(\n data=data / half_max_effective_concentration, exponent=-slope)\n return jnp.where(save_transform == 0, x=0, y=1. / (1 + save_transform))\n\n\n@functools.partial(jax.vmap, in_axes=(1, 1, None), out_axes=1)\ndef _carryover_convolve(data: jnp.ndarray,\n weights: jnp.ndarray,\n number_lags: int) -> jnp.ndarray:\n \"\"\"Applies the convolution between the data and the weights for the carryover.\n\n Args:\n data: Input data.\n weights: Window weights for the carryover.\n number_lags: Number of lags the window has.\n\n Returns:\n The result values from convolving the data and the weights with padding.\n \"\"\"\n window = jnp.concatenate([jnp.zeros(number_lags - 1), weights])\n return jax.scipy.signal.convolve(data, window, mode=\"same\") / weights.sum()\n\n\n@functools.partial(jax.jit, static_argnames=(\"number_lags\",))\ndef carryover(data: jnp.ndarray,\n ad_effect_retention_rate: jnp.ndarray,\n peak_effect_delay: jnp.ndarray,\n number_lags: int = 13) -> jnp.ndarray:\n \"\"\"Calculates media carryover.\n\n More details about this function can be found in:\n https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/46001.pdf\n\n Args:\n data: Input data. It is expected that data has either 2 dimensions for\n national models and 3 for geo models.\n ad_effect_retention_rate: Retention rate of the advertisement effect.\n Default is 0.5.\n peak_effect_delay: Delay of the peak effect in the carryover function.\n Default is 1.\n number_lags: Number of lags to include in the carryover calculation. Default\n is 13.\n\n Returns:\n The carryover values for the given data with the given parameters.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/56", "ground_truth": " lags_arange = jnp.expand_dims(jnp.arange(number_lags, dtype=jnp.float32),\n axis=-1)\n convolve_func = _carryover_convolve\n if data.ndim == 3:\n # Since _carryover_convolve is already vmaped in the decorator we only need\n # to vmap it once here to handle the geo level data. We keep the windows bi\n # dimensional also for three dims data and vmap over only the extra data\n # dimension.\n convolve_func = jax.vmap(\n fun=_carryover_convolve, in_axes=(2, None, None), out_axes=2)\n weights = ad_effect_retention_rate**((lags_arange - peak_effect_delay)**2)\n return convolve_func(data, weights, number_lags)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "media_transforms.py"], "context_start_lineno": 0, "lineno": 158, "function_name": "carryover"}, "groundtruth": " lags_arange = jnp.expand_dims(jnp.arange(number_lags, dtype=jnp.float32),\n axis=-1)\n convolve_func = _carryover_convolve\n if data.ndim == 3:\n # Since _carryover_convolve is already vmaped in the decorator we only need\n # to vmap it once here to handle the geo level data. We keep the windows bi\n # dimensional also for three dims data and vmap over only the extra data\n # dimension.\n convolve_func = jax.vmap(\n fun=_carryover_convolve, in_axes=(2, None, None), out_axes=2)\n weights = ad_effect_retention_rate**((lags_arange - peak_effect_delay)**2)\n return convolve_func(data, weights, number_lags)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for lightweight_mmm.\"\"\"\n\nimport copy\n\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro.distributions as dist\n\nfrom lightweight_mmm import lightweight_mmm\nfrom lightweight_mmm import models\n\n\nclass LightweightMmmTest(parameterized.TestCase):\n\n @classmethod\n def setUpClass(cls):", "metadata": {"task_id": "google--lightweight_mmm/57", "ground_truth": " super(LightweightMmmTest, cls).setUpClass()\n cls.national_mmm = lightweight_mmm.LightweightMMM()\n cls.national_mmm.fit(\n media=jnp.ones((50, 5)),\n target=jnp.ones(50),\n media_prior=jnp.ones(5) * 50,\n extra_features=jnp.ones((50, 2)),\n number_warmup=2,\n number_samples=4,\n number_chains=1)\n cls.geo_mmm = lightweight_mmm.LightweightMMM()\n cls.geo_mmm.fit(\n media=jnp.ones((50, 5, 3)),\n target=jnp.ones((50, 3)),\n media_prior=jnp.ones(5) * 50,\n extra_features=jnp.ones((50, 2, 3)),\n number_warmup=2,\n number_samples=4,\n number_chains=1)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "lightweight_mmm_test.py"], "context_start_lineno": 0, "lineno": 32, "function_name": "setUpClass"}, "groundtruth": " super(LightweightMmmTest, cls).setUpClass()\n cls.national_mmm = lightweight_mmm.LightweightMMM()\n cls.national_mmm.fit(\n media=jnp.ones((50, 5)),\n target=jnp.ones(50),\n media_prior=jnp.ones(5) * 50,\n extra_features=jnp.ones((50, 2)),\n number_warmup=2,\n number_samples=4,\n number_chains=1)\n cls.geo_mmm = lightweight_mmm.LightweightMMM()\n cls.geo_mmm.fit(\n media=jnp.ones((50, 5, 3)),\n target=jnp.ones((50, 3)),\n media_prior=jnp.ones(5) * 50,\n extra_features=jnp.ones((50, 2, 3)),\n number_warmup=2,\n number_samples=4,\n number_chains=1)\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple and lightweight library for Media Mix Modelling.\n\nSimple usage of this class goes as following:\n\n```\nmmm = lightweight_mmm.LightweightMMM()\nmmm.fit(media=media_data,\n extra_features=extra_features,\n media_prior=costs,\n target=target,\n number_samples=1000,\n number_chains=2)\n\n# For obtaining media contribution percentage and ROI\npredictions, media_contribution_hat_pct, roi_hat = mmm.get_posterior_metrics()\n\n# For running predictions on unseen data\nmmm.predict(media=media_data_test, extra_features=extra_features_test)\n```\n\"\"\"\n\nimport collections\nimport dataclasses\nimport functools\nimport itertools\nimport logging\nimport numbers\nfrom typing import Any, Callable, Dict, Mapping, MutableMapping, Optional, Sequence, Tuple, Union\n\nfrom absl import logging\nimport immutabledict\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import infer\n\nfrom lightweight_mmm import models\nfrom lightweight_mmm import preprocessing\nfrom lightweight_mmm import utils\n\nPrior = Union[\n dist.Distribution,\n Dict[str, float],\n Sequence[float],\n float\n]\n\n_NAMES_TO_MODEL_TRANSFORMS = immutabledict.immutabledict({\n \"hill_adstock\": models.transform_hill_adstock,\n \"adstock\": models.transform_adstock,\n \"carryover\": models.transform_carryover\n})\n_MODEL_FUNCTION = models.media_mix_model\n\n\ndef _compare_equality_for_lmmm(item_1: Any, item_2: Any) -> bool:\n \"\"\"Compares two items for equality.\n\n Helper function for the __eq__ method of LightweightmMM. First checks if items\n are strings or lists of strings (it's okay if empty lists compare True), then\n uses jnp.array_equal if the items are jax.numpy.DeviceArray or other related\n sequences, and uses items' __eq__ otherwise.\n\n Note: this implementation does not cover every possible data structure, but\n it does cover all the data structures seen in attributes used by\n LightweightMMM. Sometimes the DeviceArray is hidden in the value of a\n MutableMapping, hence the recursion.\n\n Args:\n item_1: First item to be compared.\n item_2: Second item to be compared.\n\n Returns:\n Boolean for whether item_1 equals item_2.\n \"\"\"\n\n # This is pretty strict but LMMM classes don't need to compare equal unless\n # they are exact copies.", "metadata": {"task_id": "google--lightweight_mmm/58", "ground_truth": " if type(item_1) != type(item_2):\n is_equal = False\n elif isinstance(item_1, str):\n is_equal = item_1 == item_2\n elif isinstance(item_1, (jax.Array, np.ndarray, Sequence)):\n if all(isinstance(x, str) for x in item_1) and all(\n isinstance(x, str) for x in item_2):\n is_equal = item_1 == item_2\n else:\n is_equal = np.array_equal(item_1, item_2, equal_nan=True)\n elif isinstance(item_1, MutableMapping):\n is_equal = all([\n _compare_equality_for_lmmm(item_1[x], item_2[x])\n for x in item_1.keys() | item_2.keys()\n ])\n else:\n is_equal = item_1 == item_2\n\n return is_equal\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "lightweight_mmm.py"], "context_start_lineno": 0, "lineno": 94, "function_name": "_compare_equality_for_lmmm"}, "groundtruth": " if type(item_1) != type(item_2):\n is_equal = False\n elif isinstance(item_1, str):\n is_equal = item_1 == item_2\n elif isinstance(item_1, (jax.Array, np.ndarray, Sequence)):\n if all(isinstance(x, str) for x in item_1) and all(\n isinstance(x, str) for x in item_2):\n is_equal = item_1 == item_2\n else:\n is_equal = np.array_equal(item_1, item_2, equal_nan=True)\n elif isinstance(item_1, MutableMapping):\n is_equal = all([\n _compare_equality_for_lmmm(item_1[x], item_2[x])\n for x in item_1.keys() | item_2.keys()\n ])\n else:\n is_equal = item_1 == item_2\n\n return is_equal\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple and lightweight library for Media Mix Modelling.\n\nSimple usage of this class goes as following:\n\n```\nmmm = lightweight_mmm.LightweightMMM()\nmmm.fit(media=media_data,\n extra_features=extra_features,\n media_prior=costs,\n target=target,\n number_samples=1000,\n number_chains=2)\n\n# For obtaining media contribution percentage and ROI\npredictions, media_contribution_hat_pct, roi_hat = mmm.get_posterior_metrics()\n\n# For running predictions on unseen data\nmmm.predict(media=media_data_test, extra_features=extra_features_test)\n```\n\"\"\"\n\nimport collections\nimport dataclasses\nimport functools\nimport itertools\nimport logging\nimport numbers\nfrom typing import Any, Callable, Dict, Mapping, MutableMapping, Optional, Sequence, Tuple, Union\n\nfrom absl import logging\nimport immutabledict\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import infer\n\nfrom lightweight_mmm import models\nfrom lightweight_mmm import preprocessing\nfrom lightweight_mmm import utils\n\nPrior = Union[\n dist.Distribution,\n Dict[str, float],\n Sequence[float],\n float\n]\n\n_NAMES_TO_MODEL_TRANSFORMS = immutabledict.immutabledict({\n \"hill_adstock\": models.transform_hill_adstock,\n \"adstock\": models.transform_adstock,\n \"carryover\": models.transform_carryover\n})\n_MODEL_FUNCTION = models.media_mix_model\n\n\ndef _compare_equality_for_lmmm(item_1: Any, item_2: Any) -> bool:\n \"\"\"Compares two items for equality.\n\n Helper function for the __eq__ method of LightweightmMM. First checks if items\n are strings or lists of strings (it's okay if empty lists compare True), then\n uses jnp.array_equal if the items are jax.numpy.DeviceArray or other related\n sequences, and uses items' __eq__ otherwise.\n\n Note: this implementation does not cover every possible data structure, but\n it does cover all the data structures seen in attributes used by\n LightweightMMM. Sometimes the DeviceArray is hidden in the value of a\n MutableMapping, hence the recursion.\n\n Args:\n item_1: First item to be compared.\n item_2: Second item to be compared.\n\n Returns:\n Boolean for whether item_1 equals item_2.\n \"\"\"\n\n # This is pretty strict but LMMM classes don't need to compare equal unless\n # they are exact copies.\n if type(item_1) != type(item_2):\n is_equal = False\n elif isinstance(item_1, str):\n is_equal = item_1 == item_2\n elif isinstance(item_1, (jax.Array, np.ndarray, Sequence)):\n if all(isinstance(x, str) for x in item_1) and all(\n isinstance(x, str) for x in item_2):\n is_equal = item_1 == item_2\n else:\n is_equal = np.array_equal(item_1, item_2, equal_nan=True)\n elif isinstance(item_1, MutableMapping):\n is_equal = all([\n _compare_equality_for_lmmm(item_1[x], item_2[x])\n for x in item_1.keys() | item_2.keys()\n ])\n else:\n is_equal = item_1 == item_2\n\n return is_equal\n\n\nclass NotFittedModelError(Exception):\n pass\n\n\n@dataclasses.dataclass(unsafe_hash=True, eq=False)\nclass LightweightMMM:\n \"\"\"Lightweight Media Mix Modelling wrapper for bayesian models.\n\n The currently available models are the following:\n - hill_adstock\n - adstock\n - carryover\n\n It also offers the necessary utilities for calculating media contribution and\n media ROI based on models' results.\n\n Attributes:\n trace: Sampling trace of the bayesian model once fitted.\n n_media_channels: Number of media channels the model was trained with.\n n_geos: Number of geos for geo models or 1 for national models.\n model_name: Name of the model.\n media: The media data the model is trained on. Usefull for a variety of\n insights post model fitting.\n media_names: Names of the media channels passed at fitting time.\n custom_priors: The set of custom priors the model was trained with. An empty\n dictionary if none were passed.\n \"\"\"\n model_name: str = \"hill_adstock\"\n n_media_channels: int = dataclasses.field(init=False, repr=False)\n n_geos: int = dataclasses.field(init=False, repr=False)\n media: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n media_names: Sequence[str] = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n trace: Dict[str, jnp.DeviceArray] = dataclasses.field(\n init=False, repr=False, hash=False, compare=False)\n custom_priors: MutableMapping[str, Prior] = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _degrees_seasonality: int = dataclasses.field(init=False, repr=False)\n _weekday_seasonality: bool = dataclasses.field(init=False, repr=False)\n _media_prior: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _extra_features: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _target: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _train_media_size: int = dataclasses.field(\n init=False, repr=False, hash=True, compare=False)\n _mcmc: numpyro.infer.MCMC = dataclasses.field(\n init=False, repr=False, hash=False, compare=False)\n\n def __post_init__(self):", "metadata": {"task_id": "google--lightweight_mmm/59", "ground_truth": " if self.model_name not in _NAMES_TO_MODEL_TRANSFORMS:\n raise ValueError(\"Model name passed not valid. Please use any of the\"\n \"following: 'hill_adstock', 'adstock', 'carryover'.\")\n self._model_function = _MODEL_FUNCTION\n self._model_transform_function = _NAMES_TO_MODEL_TRANSFORMS[self.model_name]\n self._prior_names = models.MODEL_PRIORS_NAMES.union(\n models.TRANSFORM_PRIORS_NAMES[self.model_name])\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "lightweight_mmm.py"], "context_start_lineno": 0, "lineno": 167, "function_name": "__post_init__"}, "groundtruth": " if self.model_name not in _NAMES_TO_MODEL_TRANSFORMS:\n raise ValueError(\"Model name passed not valid. Please use any of the\"\n \"following: 'hill_adstock', 'adstock', 'carryover'.\")\n self._model_function = _MODEL_FUNCTION\n self._model_transform_function = _NAMES_TO_MODEL_TRANSFORMS[self.model_name]\n self._prior_names = models.MODEL_PRIORS_NAMES.union(\n models.TRANSFORM_PRIORS_NAMES[self.model_name])\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple and lightweight library for Media Mix Modelling.\n\nSimple usage of this class goes as following:\n\n```\nmmm = lightweight_mmm.LightweightMMM()\nmmm.fit(media=media_data,\n extra_features=extra_features,\n media_prior=costs,\n target=target,\n number_samples=1000,\n number_chains=2)\n\n# For obtaining media contribution percentage and ROI\npredictions, media_contribution_hat_pct, roi_hat = mmm.get_posterior_metrics()\n\n# For running predictions on unseen data\nmmm.predict(media=media_data_test, extra_features=extra_features_test)\n```\n\"\"\"\n\nimport collections\nimport dataclasses\nimport functools\nimport itertools\nimport logging\nimport numbers\nfrom typing import Any, Callable, Dict, Mapping, MutableMapping, Optional, Sequence, Tuple, Union\n\nfrom absl import logging\nimport immutabledict\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import infer\n\nfrom lightweight_mmm import models\nfrom lightweight_mmm import preprocessing\nfrom lightweight_mmm import utils\n\nPrior = Union[\n dist.Distribution,\n Dict[str, float],\n Sequence[float],\n float\n]\n\n_NAMES_TO_MODEL_TRANSFORMS = immutabledict.immutabledict({\n \"hill_adstock\": models.transform_hill_adstock,\n \"adstock\": models.transform_adstock,\n \"carryover\": models.transform_carryover\n})\n_MODEL_FUNCTION = models.media_mix_model\n\n\ndef _compare_equality_for_lmmm(item_1: Any, item_2: Any) -> bool:\n \"\"\"Compares two items for equality.\n\n Helper function for the __eq__ method of LightweightmMM. First checks if items\n are strings or lists of strings (it's okay if empty lists compare True), then\n uses jnp.array_equal if the items are jax.numpy.DeviceArray or other related\n sequences, and uses items' __eq__ otherwise.\n\n Note: this implementation does not cover every possible data structure, but\n it does cover all the data structures seen in attributes used by\n LightweightMMM. Sometimes the DeviceArray is hidden in the value of a\n MutableMapping, hence the recursion.\n\n Args:\n item_1: First item to be compared.\n item_2: Second item to be compared.\n\n Returns:\n Boolean for whether item_1 equals item_2.\n \"\"\"\n\n # This is pretty strict but LMMM classes don't need to compare equal unless\n # they are exact copies.\n if type(item_1) != type(item_2):\n is_equal = False\n elif isinstance(item_1, str):\n is_equal = item_1 == item_2\n elif isinstance(item_1, (jax.Array, np.ndarray, Sequence)):\n if all(isinstance(x, str) for x in item_1) and all(\n isinstance(x, str) for x in item_2):\n is_equal = item_1 == item_2\n else:\n is_equal = np.array_equal(item_1, item_2, equal_nan=True)\n elif isinstance(item_1, MutableMapping):\n is_equal = all([\n _compare_equality_for_lmmm(item_1[x], item_2[x])\n for x in item_1.keys() | item_2.keys()\n ])\n else:\n is_equal = item_1 == item_2\n\n return is_equal\n\n\nclass NotFittedModelError(Exception):\n pass\n\n\n@dataclasses.dataclass(unsafe_hash=True, eq=False)\nclass LightweightMMM:\n \"\"\"Lightweight Media Mix Modelling wrapper for bayesian models.\n\n The currently available models are the following:\n - hill_adstock\n - adstock\n - carryover\n\n It also offers the necessary utilities for calculating media contribution and\n media ROI based on models' results.\n\n Attributes:\n trace: Sampling trace of the bayesian model once fitted.\n n_media_channels: Number of media channels the model was trained with.\n n_geos: Number of geos for geo models or 1 for national models.\n model_name: Name of the model.\n media: The media data the model is trained on. Usefull for a variety of\n insights post model fitting.\n media_names: Names of the media channels passed at fitting time.\n custom_priors: The set of custom priors the model was trained with. An empty\n dictionary if none were passed.\n \"\"\"\n model_name: str = \"hill_adstock\"\n n_media_channels: int = dataclasses.field(init=False, repr=False)\n n_geos: int = dataclasses.field(init=False, repr=False)\n media: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n media_names: Sequence[str] = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n trace: Dict[str, jnp.DeviceArray] = dataclasses.field(\n init=False, repr=False, hash=False, compare=False)\n custom_priors: MutableMapping[str, Prior] = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _degrees_seasonality: int = dataclasses.field(init=False, repr=False)\n _weekday_seasonality: bool = dataclasses.field(init=False, repr=False)\n _media_prior: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _extra_features: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _target: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _train_media_size: int = dataclasses.field(\n init=False, repr=False, hash=True, compare=False)\n _mcmc: numpyro.infer.MCMC = dataclasses.field(\n init=False, repr=False, hash=False, compare=False)\n\n def __post_init__(self):\n if self.model_name not in _NAMES_TO_MODEL_TRANSFORMS:\n raise ValueError(\"Model name passed not valid. Please use any of the\"\n \"following: 'hill_adstock', 'adstock', 'carryover'.\")\n self._model_function = _MODEL_FUNCTION\n self._model_transform_function = _NAMES_TO_MODEL_TRANSFORMS[self.model_name]\n self._prior_names = models.MODEL_PRIORS_NAMES.union(\n models.TRANSFORM_PRIORS_NAMES[self.model_name])\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Equality method for LightweightMMMM.\n\n We need a special method here to handle a couple of issues. First, some of\n the attributes for LightweightMMM are arrays, which contain multiple values\n and cannot be evaluated with the default __eq__ method. Second, some\n attributes are initially undefined and only get values after fitting a\n model. The latter is dealt with within this function, and the former within\n the helper function _compare_equality_for_lmmm().\n\n Args:\n other: Dataclass to compare against.\n\n Returns:\n Boolean for whether self == other; NotImplemented if other is not a\n LightweightMMM.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/60", "ground_truth": " if not isinstance(other, LightweightMMM):\n return NotImplemented\n\n def _create_list_of_attributes_to_compare(\n mmm_instance: Any) -> Sequence[str]:\n all_attributes_that_can_be_compared = sorted(\n [x.name for x in dataclasses.fields(mmm_instance) if x.compare])\n attributes_which_have_been_instantiated = [\n x for x in all_attributes_that_can_be_compared\n if hasattr(mmm_instance, x)\n ]\n return attributes_which_have_been_instantiated\n\n self_attributes = _create_list_of_attributes_to_compare(self)\n other_attributes = _create_list_of_attributes_to_compare(other)\n\n return all(\n _compare_equality_for_lmmm(getattr(self, a1), getattr(other, a2))\n for a1, a2 in itertools.zip_longest(self_attributes, other_attributes))\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "lightweight_mmm.py"], "context_start_lineno": 0, "lineno": 192, "function_name": "__eq__"}, "groundtruth": " if not isinstance(other, LightweightMMM):\n return NotImplemented\n\n def _create_list_of_attributes_to_compare(\n mmm_instance: Any) -> Sequence[str]:\n all_attributes_that_can_be_compared = sorted(\n [x.name for x in dataclasses.fields(mmm_instance) if x.compare])\n attributes_which_have_been_instantiated = [\n x for x in all_attributes_that_can_be_compared\n if hasattr(mmm_instance, x)\n ]\n return attributes_which_have_been_instantiated\n\n self_attributes = _create_list_of_attributes_to_compare(self)\n other_attributes = _create_list_of_attributes_to_compare(other)\n\n return all(\n _compare_equality_for_lmmm(getattr(self, a1), getattr(other, a2))\n for a1, a2 in itertools.zip_longest(self_attributes, other_attributes))\n"} +{"prompt": "# Copyright 2023 Google LLC.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple and lightweight library for Media Mix Modelling.\n\nSimple usage of this class goes as following:\n\n```\nmmm = lightweight_mmm.LightweightMMM()\nmmm.fit(media=media_data,\n extra_features=extra_features,\n media_prior=costs,\n target=target,\n number_samples=1000,\n number_chains=2)\n\n# For obtaining media contribution percentage and ROI\npredictions, media_contribution_hat_pct, roi_hat = mmm.get_posterior_metrics()\n\n# For running predictions on unseen data\nmmm.predict(media=media_data_test, extra_features=extra_features_test)\n```\n\"\"\"\n\nimport collections\nimport dataclasses\nimport functools\nimport itertools\nimport logging\nimport numbers\nfrom typing import Any, Callable, Dict, Mapping, MutableMapping, Optional, Sequence, Tuple, Union\n\nfrom absl import logging\nimport immutabledict\nimport jax\nimport jax.numpy as jnp\nimport numpy as np\nimport numpyro\nfrom numpyro import distributions as dist\nfrom numpyro import infer\n\nfrom lightweight_mmm import models\nfrom lightweight_mmm import preprocessing\nfrom lightweight_mmm import utils\n\nPrior = Union[\n dist.Distribution,\n Dict[str, float],\n Sequence[float],\n float\n]\n\n_NAMES_TO_MODEL_TRANSFORMS = immutabledict.immutabledict({\n \"hill_adstock\": models.transform_hill_adstock,\n \"adstock\": models.transform_adstock,\n \"carryover\": models.transform_carryover\n})\n_MODEL_FUNCTION = models.media_mix_model\n\n\ndef _compare_equality_for_lmmm(item_1: Any, item_2: Any) -> bool:\n \"\"\"Compares two items for equality.\n\n Helper function for the __eq__ method of LightweightmMM. First checks if items\n are strings or lists of strings (it's okay if empty lists compare True), then\n uses jnp.array_equal if the items are jax.numpy.DeviceArray or other related\n sequences, and uses items' __eq__ otherwise.\n\n Note: this implementation does not cover every possible data structure, but\n it does cover all the data structures seen in attributes used by\n LightweightMMM. Sometimes the DeviceArray is hidden in the value of a\n MutableMapping, hence the recursion.\n\n Args:\n item_1: First item to be compared.\n item_2: Second item to be compared.\n\n Returns:\n Boolean for whether item_1 equals item_2.\n \"\"\"\n\n # This is pretty strict but LMMM classes don't need to compare equal unless\n # they are exact copies.\n if type(item_1) != type(item_2):\n is_equal = False\n elif isinstance(item_1, str):\n is_equal = item_1 == item_2\n elif isinstance(item_1, (jax.Array, np.ndarray, Sequence)):\n if all(isinstance(x, str) for x in item_1) and all(\n isinstance(x, str) for x in item_2):\n is_equal = item_1 == item_2\n else:\n is_equal = np.array_equal(item_1, item_2, equal_nan=True)\n elif isinstance(item_1, MutableMapping):\n is_equal = all([\n _compare_equality_for_lmmm(item_1[x], item_2[x])\n for x in item_1.keys() | item_2.keys()\n ])\n else:\n is_equal = item_1 == item_2\n\n return is_equal\n\n\nclass NotFittedModelError(Exception):\n pass\n\n\n@dataclasses.dataclass(unsafe_hash=True, eq=False)\nclass LightweightMMM:\n \"\"\"Lightweight Media Mix Modelling wrapper for bayesian models.\n\n The currently available models are the following:\n - hill_adstock\n - adstock\n - carryover\n\n It also offers the necessary utilities for calculating media contribution and\n media ROI based on models' results.\n\n Attributes:\n trace: Sampling trace of the bayesian model once fitted.\n n_media_channels: Number of media channels the model was trained with.\n n_geos: Number of geos for geo models or 1 for national models.\n model_name: Name of the model.\n media: The media data the model is trained on. Usefull for a variety of\n insights post model fitting.\n media_names: Names of the media channels passed at fitting time.\n custom_priors: The set of custom priors the model was trained with. An empty\n dictionary if none were passed.\n \"\"\"\n model_name: str = \"hill_adstock\"\n n_media_channels: int = dataclasses.field(init=False, repr=False)\n n_geos: int = dataclasses.field(init=False, repr=False)\n media: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n media_names: Sequence[str] = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n trace: Dict[str, jnp.DeviceArray] = dataclasses.field(\n init=False, repr=False, hash=False, compare=False)\n custom_priors: MutableMapping[str, Prior] = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _degrees_seasonality: int = dataclasses.field(init=False, repr=False)\n _weekday_seasonality: bool = dataclasses.field(init=False, repr=False)\n _media_prior: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _extra_features: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _target: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _train_media_size: int = dataclasses.field(\n init=False, repr=False, hash=True, compare=False)\n _mcmc: numpyro.infer.MCMC = dataclasses.field(\n init=False, repr=False, hash=False, compare=False)\n\n def __post_init__(self):\n if self.model_name not in _NAMES_TO_MODEL_TRANSFORMS:\n raise ValueError(\"Model name passed not valid. Please use any of the\"\n \"following: 'hill_adstock', 'adstock', 'carryover'.\")\n self._model_function = _MODEL_FUNCTION\n self._model_transform_function = _NAMES_TO_MODEL_TRANSFORMS[self.model_name]\n self._prior_names = models.MODEL_PRIORS_NAMES.union(\n models.TRANSFORM_PRIORS_NAMES[self.model_name])\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Equality method for LightweightMMMM.\n\n We need a special method here to handle a couple of issues. First, some of\n the attributes for LightweightMMM are arrays, which contain multiple values\n and cannot be evaluated with the default __eq__ method. Second, some\n attributes are initially undefined and only get values after fitting a\n model. The latter is dealt with within this function, and the former within\n the helper function _compare_equality_for_lmmm().\n\n Args:\n other: Dataclass to compare against.\n\n Returns:\n Boolean for whether self == other; NotImplemented if other is not a\n LightweightMMM.\n \"\"\"\n if not isinstance(other, LightweightMMM):\n return NotImplemented\n\n def _create_list_of_attributes_to_compare(\n mmm_instance: Any) -> Sequence[str]:", "metadata": {"task_id": "google--lightweight_mmm/61", "ground_truth": " all_attributes_that_can_be_compared = sorted(\n [x.name for x in dataclasses.fields(mmm_instance) if x.compare])\n attributes_which_have_been_instantiated = [\n x for x in all_attributes_that_can_be_compared\n if hasattr(mmm_instance, x)\n ]\n return attributes_which_have_been_instantiated\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "lightweight_mmm.py"], "context_start_lineno": 0, "lineno": 197, "function_name": "_create_list_of_attributes_to_compare"}, "groundtruth": " all_attributes_that_can_be_compared = sorted(\n [x.name for x in dataclasses.fields(mmm_instance) if x.compare])\n attributes_which_have_been_instantiated = [\n x for x in all_attributes_that_can_be_compared\n if hasattr(mmm_instance, x)\n ]\n return attributes_which_have_been_instantiated\n"} +{"prompt": " trace: Sampling trace of the bayesian model once fitted.\n n_media_channels: Number of media channels the model was trained with.\n n_geos: Number of geos for geo models or 1 for national models.\n model_name: Name of the model.\n media: The media data the model is trained on. Usefull for a variety of\n insights post model fitting.\n media_names: Names of the media channels passed at fitting time.\n custom_priors: The set of custom priors the model was trained with. An empty\n dictionary if none were passed.\n \"\"\"\n model_name: str = \"hill_adstock\"\n n_media_channels: int = dataclasses.field(init=False, repr=False)\n n_geos: int = dataclasses.field(init=False, repr=False)\n media: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n media_names: Sequence[str] = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n trace: Dict[str, jnp.DeviceArray] = dataclasses.field(\n init=False, repr=False, hash=False, compare=False)\n custom_priors: MutableMapping[str, Prior] = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _degrees_seasonality: int = dataclasses.field(init=False, repr=False)\n _weekday_seasonality: bool = dataclasses.field(init=False, repr=False)\n _media_prior: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _extra_features: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _target: jnp.DeviceArray = dataclasses.field(\n init=False, repr=False, hash=False, compare=True)\n _train_media_size: int = dataclasses.field(\n init=False, repr=False, hash=True, compare=False)\n _mcmc: numpyro.infer.MCMC = dataclasses.field(\n init=False, repr=False, hash=False, compare=False)\n\n def __post_init__(self):\n if self.model_name not in _NAMES_TO_MODEL_TRANSFORMS:\n raise ValueError(\"Model name passed not valid. Please use any of the\"\n \"following: 'hill_adstock', 'adstock', 'carryover'.\")\n self._model_function = _MODEL_FUNCTION\n self._model_transform_function = _NAMES_TO_MODEL_TRANSFORMS[self.model_name]\n self._prior_names = models.MODEL_PRIORS_NAMES.union(\n models.TRANSFORM_PRIORS_NAMES[self.model_name])\n\n def __eq__(self, other: Any) -> bool:\n \"\"\"Equality method for LightweightMMMM.\n\n We need a special method here to handle a couple of issues. First, some of\n the attributes for LightweightMMM are arrays, which contain multiple values\n and cannot be evaluated with the default __eq__ method. Second, some\n attributes are initially undefined and only get values after fitting a\n model. The latter is dealt with within this function, and the former within\n the helper function _compare_equality_for_lmmm().\n\n Args:\n other: Dataclass to compare against.\n\n Returns:\n Boolean for whether self == other; NotImplemented if other is not a\n LightweightMMM.\n \"\"\"\n if not isinstance(other, LightweightMMM):\n return NotImplemented\n\n def _create_list_of_attributes_to_compare(\n mmm_instance: Any) -> Sequence[str]:\n all_attributes_that_can_be_compared = sorted(\n [x.name for x in dataclasses.fields(mmm_instance) if x.compare])\n attributes_which_have_been_instantiated = [\n x for x in all_attributes_that_can_be_compared\n if hasattr(mmm_instance, x)\n ]\n return attributes_which_have_been_instantiated\n\n self_attributes = _create_list_of_attributes_to_compare(self)\n other_attributes = _create_list_of_attributes_to_compare(other)\n\n return all(\n _compare_equality_for_lmmm(getattr(self, a1), getattr(other, a2))\n for a1, a2 in itertools.zip_longest(self_attributes, other_attributes))\n\n def _preprocess_custom_priors(\n self,\n custom_priors: Dict[str, Prior]) -> MutableMapping[str, Prior]:\n \"\"\"Preprocesses the user input custom priors to Numpyro distributions.\n\n If numpyro distributions are given they remains untouched, however if any\n other option is passed, it is passed to the default distribution to alter\n its constructor values.\n\n Args:\n custom_priors: Mapping of the name of the prior to its custom value.\n\n Returns:\n A mapping of names to numpyro distributions based on user input and\n default values.\n \"\"\"\n default_priors = {\n **models._get_default_priors(),\n **models._get_transform_default_priors()[self.model_name]\n }\n # Checking that the key is contained in custom_priors has already been done\n # at this point in the fit function.\n for prior_name in custom_priors:\n if isinstance(custom_priors[prior_name], numbers.Number):\n custom_priors[prior_name] = default_priors[prior_name].__class__(\n custom_priors[prior_name])\n elif (isinstance(custom_priors[prior_name], collections.abc.Sequence) and\n not isinstance(custom_priors[prior_name], str)):\n custom_priors[prior_name] = default_priors[prior_name].__class__(\n *custom_priors[prior_name])\n elif isinstance(custom_priors[prior_name], dict):\n custom_priors[prior_name] = default_priors[prior_name].__class__(\n **custom_priors[prior_name])\n elif not isinstance(custom_priors[prior_name], dist.Distribution):\n raise ValueError(\n \"Priors given must be a Numpyro distribution or one of the \"\n \"following to fit in the constructor of our default Numpyro \"\n \"distribution. It could be given as args or kwargs as long as it \"\n \"is the correct format for such object. Please refer to our \"\n \"documentation on custom priors to know more.\")\n return custom_priors\n\n def fit(\n self,\n media: jnp.ndarray,\n media_prior: jnp.ndarray,\n target: jnp.ndarray,\n extra_features: Optional[jnp.ndarray] = None,\n degrees_seasonality: int = 2,\n seasonality_frequency: int = 52,\n weekday_seasonality: bool = False,\n media_names: Optional[Sequence[str]] = None,\n number_warmup: int = 1000,\n number_samples: int = 1000,\n number_chains: int = 2,\n target_accept_prob: float = .85,\n init_strategy: Callable[[Mapping[Any, Any], Any],\n jnp.ndarray] = numpyro.infer.init_to_median,\n custom_priors: Optional[Dict[str, Prior]] = None,\n seed: Optional[int] = None) -> None:\n \"\"\"Fits MMM given the media data, extra features, costs and sales/KPI.\n\n For detailed information on the selected model please refer to its\n respective function in the models.py file.\n\n Args:\n media: Media input data. Media data must have either 2 dims for national\n model or 3 for geo models.\n media_prior: Costs of each media channel. The number of cost values must\n be equal to the number of media channels.\n target: Target KPI to use, like for example sales.\n extra_features: Other variables to add to the model.\n degrees_seasonality: Number of degrees to use for seasonality. Default is\n 2.\n seasonality_frequency: Frequency of the time period used. Default is 52 as\n in 52 weeks per year.\n weekday_seasonality: In case of daily data, also estimate seven weekday\n parameters.\n media_names: Names of the media channels passed.\n number_warmup: Number of warm up samples. Default is 1000.\n number_samples: Number of samples during sampling. Default is 1000.\n number_chains: Number of chains to sample. Default is 2.\n target_accept_prob: Target acceptance probability for step size in the\n NUTS sampler. Default is .85.\n init_strategy: Initialization function for numpyro NUTS. The available\n options can be found in\n https://num.pyro.ai/en/stable/utilities.html#initialization-strategies.\n Default is numpyro.infer.init_to_median.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. Refer to the full documentation on custom priors for\n details.\n seed: Seed to use for PRNGKey during training. For better replicability\n run all different trainings with the same seed.\n \"\"\"\n if media.ndim not in (2, 3):\n raise ValueError(\n \"Media data must have either 2 dims for national model or 3 for geo \"\n \"models.\")\n if media.ndim == 3 and media_prior.ndim == 1:\n media_prior = jnp.expand_dims(media_prior, axis=-1)\n\n if media.shape[1] != len(media_prior):\n raise ValueError(\"The number of data channels provided must match the \"\n \"number of cost values.\")\n if media.min() < 0:\n raise ValueError(\"Media values must be greater or equal to zero.\")\n\n if custom_priors:\n not_used_custom_priors = set(custom_priors.keys()).difference(\n self._prior_names)\n if not_used_custom_priors:\n raise ValueError(\n \"The following passed custom priors dont have a match in the model.\"\n \" Please double check the names have been written correctly: %s\" %\n not_used_custom_priors)\n custom_priors = self._preprocess_custom_priors(\n custom_priors=custom_priors)\n geo_custom_priors = set(custom_priors.keys()).intersection(\n models.GEO_ONLY_PRIORS)\n if media.ndim == 2 and geo_custom_priors:\n raise ValueError(\n \"The given data is for national models but custom_prior contains \"\n \"priors for the geo version of the model. Please either remove geo \"\n \"priors for national model or pass media data with geo dimension.\")\n else:\n custom_priors = {}\n\n if weekday_seasonality and seasonality_frequency == 52:\n logging.warn(\"You have chosen daily seasonality and frequency 52 \"\n \"(weekly), please check you made the right seasonality \"\n \"choices.\")\n\n if extra_features is not None:\n extra_features = jnp.array(extra_features)\n\n if seed is None:\n seed = utils.get_time_seed()\n\n train_media_size = media.shape[0]\n kernel = numpyro.infer.NUTS(\n model=self._model_function,\n target_accept_prob=target_accept_prob,\n init_strategy=init_strategy)\n\n mcmc = numpyro.infer.MCMC(\n sampler=kernel,\n num_warmup=number_warmup,\n num_samples=number_samples,\n num_chains=number_chains)\n mcmc.run(\n rng_key=jax.random.PRNGKey(seed),\n media_data=jnp.array(media),\n extra_features=extra_features,\n target_data=jnp.array(target),\n media_prior=jnp.array(media_prior),\n degrees_seasonality=degrees_seasonality,\n frequency=seasonality_frequency,\n transform_function=self._model_transform_function,\n weekday_seasonality=weekday_seasonality,\n custom_priors=custom_priors)\n\n self.custom_priors = custom_priors\n if media_names is not None:\n self.media_names = media_names\n else:\n self.media_names = [f\"channel_{i}\" for i in range(media.shape[1])]\n self.n_media_channels = media.shape[1]\n self.n_geos = media.shape[2] if media.ndim == 3 else 1\n self._media_prior = media_prior\n self.trace = mcmc.get_samples()\n self._number_warmup = number_warmup\n self._number_samples = number_samples\n self._number_chains = number_chains\n self._target = target\n self._train_media_size = train_media_size\n self._degrees_seasonality = degrees_seasonality\n self._seasonality_frequency = seasonality_frequency\n self._weekday_seasonality = weekday_seasonality\n self.media = media\n self._extra_features = extra_features# jax-devicearray\n self._mcmc = mcmc\n logging.info(\"Model has been fitted\")\n\n def print_summary(self) -> None:\n \"\"\"Calls print_summary function from numpyro to print parameters summary.\n \"\"\"\n # TODO(): add name selection for print.\n self._mcmc.print_summary()\n\n @functools.partial(\n jax.jit,\n static_argnums=(0,),\n static_argnames=(\"degrees_seasonality\", \"weekday_seasonality\",\n \"transform_function\", \"model\"))\n def _predict(\n self,\n rng_key: jnp.ndarray,\n media_data: jnp.ndarray,\n extra_features: Optional[jnp.ndarray],\n media_prior: jnp.ndarray,\n degrees_seasonality: int, frequency: int,\n transform_function: Callable[[Any], jnp.ndarray],\n weekday_seasonality: bool,\n model: Callable[[Any], None],\n posterior_samples: Dict[str, jnp.ndarray],\n custom_priors: Dict[str, Prior]\n ) -> Dict[str, jnp.ndarray]:\n \"\"\"Encapsulates the numpyro.infer.Predictive function for predict method.\n\n It serves as a helper jitted function for running predictions.\n\n Args:\n rng_key: A jax.random.PRNGKey.\n media_data: Media array for needed for the model to run predictions.\n extra_features: Extra features for needed for the model to run.\n media_prior: Cost prior used for training the model.\n degrees_seasonality: Number of degrees for the seasonality.\n frequency: Frequency of the seasonality.\n transform_function: Media transform function to use within the model.\n weekday_seasonality: Allow daily weekday estimation.\n model: Numpyro model to use for numpyro.infer.Predictive.\n posterior_samples: Mapping of the posterior samples.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. Refer to the full documentation on custom priors for\n details.\n\n Returns:\n The predictions for the given data.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/62", "ground_truth": " return infer.Predictive(\n model=model, posterior_samples=posterior_samples)(\n rng_key=rng_key,\n media_data=media_data,\n extra_features=extra_features,\n media_prior=media_prior,\n target_data=None,\n degrees_seasonality=degrees_seasonality,\n frequency=frequency,\n transform_function=transform_function,\n custom_priors=custom_priors,\n weekday_seasonality=weekday_seasonality)\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "lightweight_mmm.py"], "context_start_lineno": 132, "lineno": 441, "function_name": "_predict"}, "groundtruth": " return infer.Predictive(\n model=model, posterior_samples=posterior_samples)(\n rng_key=rng_key,\n media_data=media_data,\n extra_features=extra_features,\n media_prior=media_prior,\n target_data=None,\n degrees_seasonality=degrees_seasonality,\n frequency=frequency,\n transform_function=transform_function,\n custom_priors=custom_priors,\n weekday_seasonality=weekday_seasonality)\n"} +{"prompt": "\n\n def _preprocess_custom_priors(\n self,\n custom_priors: Dict[str, Prior]) -> MutableMapping[str, Prior]:\n \"\"\"Preprocesses the user input custom priors to Numpyro distributions.\n\n If numpyro distributions are given they remains untouched, however if any\n other option is passed, it is passed to the default distribution to alter\n its constructor values.\n\n Args:\n custom_priors: Mapping of the name of the prior to its custom value.\n\n Returns:\n A mapping of names to numpyro distributions based on user input and\n default values.\n \"\"\"\n default_priors = {\n **models._get_default_priors(),\n **models._get_transform_default_priors()[self.model_name]\n }\n # Checking that the key is contained in custom_priors has already been done\n # at this point in the fit function.\n for prior_name in custom_priors:\n if isinstance(custom_priors[prior_name], numbers.Number):\n custom_priors[prior_name] = default_priors[prior_name].__class__(\n custom_priors[prior_name])\n elif (isinstance(custom_priors[prior_name], collections.abc.Sequence) and\n not isinstance(custom_priors[prior_name], str)):\n custom_priors[prior_name] = default_priors[prior_name].__class__(\n *custom_priors[prior_name])\n elif isinstance(custom_priors[prior_name], dict):\n custom_priors[prior_name] = default_priors[prior_name].__class__(\n **custom_priors[prior_name])\n elif not isinstance(custom_priors[prior_name], dist.Distribution):\n raise ValueError(\n \"Priors given must be a Numpyro distribution or one of the \"\n \"following to fit in the constructor of our default Numpyro \"\n \"distribution. It could be given as args or kwargs as long as it \"\n \"is the correct format for such object. Please refer to our \"\n \"documentation on custom priors to know more.\")\n return custom_priors\n\n def fit(\n self,\n media: jnp.ndarray,\n media_prior: jnp.ndarray,\n target: jnp.ndarray,\n extra_features: Optional[jnp.ndarray] = None,\n degrees_seasonality: int = 2,\n seasonality_frequency: int = 52,\n weekday_seasonality: bool = False,\n media_names: Optional[Sequence[str]] = None,\n number_warmup: int = 1000,\n number_samples: int = 1000,\n number_chains: int = 2,\n target_accept_prob: float = .85,\n init_strategy: Callable[[Mapping[Any, Any], Any],\n jnp.ndarray] = numpyro.infer.init_to_median,\n custom_priors: Optional[Dict[str, Prior]] = None,\n seed: Optional[int] = None) -> None:\n \"\"\"Fits MMM given the media data, extra features, costs and sales/KPI.\n\n For detailed information on the selected model please refer to its\n respective function in the models.py file.\n\n Args:\n media: Media input data. Media data must have either 2 dims for national\n model or 3 for geo models.\n media_prior: Costs of each media channel. The number of cost values must\n be equal to the number of media channels.\n target: Target KPI to use, like for example sales.\n extra_features: Other variables to add to the model.\n degrees_seasonality: Number of degrees to use for seasonality. Default is\n 2.\n seasonality_frequency: Frequency of the time period used. Default is 52 as\n in 52 weeks per year.\n weekday_seasonality: In case of daily data, also estimate seven weekday\n parameters.\n media_names: Names of the media channels passed.\n number_warmup: Number of warm up samples. Default is 1000.\n number_samples: Number of samples during sampling. Default is 1000.\n number_chains: Number of chains to sample. Default is 2.\n target_accept_prob: Target acceptance probability for step size in the\n NUTS sampler. Default is .85.\n init_strategy: Initialization function for numpyro NUTS. The available\n options can be found in\n https://num.pyro.ai/en/stable/utilities.html#initialization-strategies.\n Default is numpyro.infer.init_to_median.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. Refer to the full documentation on custom priors for\n details.\n seed: Seed to use for PRNGKey during training. For better replicability\n run all different trainings with the same seed.\n \"\"\"\n if media.ndim not in (2, 3):\n raise ValueError(\n \"Media data must have either 2 dims for national model or 3 for geo \"\n \"models.\")\n if media.ndim == 3 and media_prior.ndim == 1:\n media_prior = jnp.expand_dims(media_prior, axis=-1)\n\n if media.shape[1] != len(media_prior):\n raise ValueError(\"The number of data channels provided must match the \"\n \"number of cost values.\")\n if media.min() < 0:\n raise ValueError(\"Media values must be greater or equal to zero.\")\n\n if custom_priors:\n not_used_custom_priors = set(custom_priors.keys()).difference(\n self._prior_names)\n if not_used_custom_priors:\n raise ValueError(\n \"The following passed custom priors dont have a match in the model.\"\n \" Please double check the names have been written correctly: %s\" %\n not_used_custom_priors)\n custom_priors = self._preprocess_custom_priors(\n custom_priors=custom_priors)\n geo_custom_priors = set(custom_priors.keys()).intersection(\n models.GEO_ONLY_PRIORS)\n if media.ndim == 2 and geo_custom_priors:\n raise ValueError(\n \"The given data is for national models but custom_prior contains \"\n \"priors for the geo version of the model. Please either remove geo \"\n \"priors for national model or pass media data with geo dimension.\")\n else:\n custom_priors = {}\n\n if weekday_seasonality and seasonality_frequency == 52:\n logging.warn(\"You have chosen daily seasonality and frequency 52 \"\n \"(weekly), please check you made the right seasonality \"\n \"choices.\")\n\n if extra_features is not None:\n extra_features = jnp.array(extra_features)\n\n if seed is None:\n seed = utils.get_time_seed()\n\n train_media_size = media.shape[0]\n kernel = numpyro.infer.NUTS(\n model=self._model_function,\n target_accept_prob=target_accept_prob,\n init_strategy=init_strategy)\n\n mcmc = numpyro.infer.MCMC(\n sampler=kernel,\n num_warmup=number_warmup,\n num_samples=number_samples,\n num_chains=number_chains)\n mcmc.run(\n rng_key=jax.random.PRNGKey(seed),\n media_data=jnp.array(media),\n extra_features=extra_features,\n target_data=jnp.array(target),\n media_prior=jnp.array(media_prior),\n degrees_seasonality=degrees_seasonality,\n frequency=seasonality_frequency,\n transform_function=self._model_transform_function,\n weekday_seasonality=weekday_seasonality,\n custom_priors=custom_priors)\n\n self.custom_priors = custom_priors\n if media_names is not None:\n self.media_names = media_names\n else:\n self.media_names = [f\"channel_{i}\" for i in range(media.shape[1])]\n self.n_media_channels = media.shape[1]\n self.n_geos = media.shape[2] if media.ndim == 3 else 1\n self._media_prior = media_prior\n self.trace = mcmc.get_samples()\n self._number_warmup = number_warmup\n self._number_samples = number_samples\n self._number_chains = number_chains\n self._target = target\n self._train_media_size = train_media_size\n self._degrees_seasonality = degrees_seasonality\n self._seasonality_frequency = seasonality_frequency\n self._weekday_seasonality = weekday_seasonality\n self.media = media\n self._extra_features = extra_features# jax-devicearray\n self._mcmc = mcmc\n logging.info(\"Model has been fitted\")\n\n def print_summary(self) -> None:\n \"\"\"Calls print_summary function from numpyro to print parameters summary.\n \"\"\"\n # TODO(): add name selection for print.\n self._mcmc.print_summary()\n\n @functools.partial(\n jax.jit,\n static_argnums=(0,),\n static_argnames=(\"degrees_seasonality\", \"weekday_seasonality\",\n \"transform_function\", \"model\"))\n def _predict(\n self,\n rng_key: jnp.ndarray,\n media_data: jnp.ndarray,\n extra_features: Optional[jnp.ndarray],\n media_prior: jnp.ndarray,\n degrees_seasonality: int, frequency: int,\n transform_function: Callable[[Any], jnp.ndarray],\n weekday_seasonality: bool,\n model: Callable[[Any], None],\n posterior_samples: Dict[str, jnp.ndarray],\n custom_priors: Dict[str, Prior]\n ) -> Dict[str, jnp.ndarray]:\n \"\"\"Encapsulates the numpyro.infer.Predictive function for predict method.\n\n It serves as a helper jitted function for running predictions.\n\n Args:\n rng_key: A jax.random.PRNGKey.\n media_data: Media array for needed for the model to run predictions.\n extra_features: Extra features for needed for the model to run.\n media_prior: Cost prior used for training the model.\n degrees_seasonality: Number of degrees for the seasonality.\n frequency: Frequency of the seasonality.\n transform_function: Media transform function to use within the model.\n weekday_seasonality: Allow daily weekday estimation.\n model: Numpyro model to use for numpyro.infer.Predictive.\n posterior_samples: Mapping of the posterior samples.\n custom_priors: The custom priors we want the model to take instead of the\n default ones. Refer to the full documentation on custom priors for\n details.\n\n Returns:\n The predictions for the given data.\n \"\"\"\n return infer.Predictive(\n model=model, posterior_samples=posterior_samples)(\n rng_key=rng_key,\n media_data=media_data,\n extra_features=extra_features,\n media_prior=media_prior,\n target_data=None,\n degrees_seasonality=degrees_seasonality,\n frequency=frequency,\n transform_function=transform_function,\n custom_priors=custom_priors,\n weekday_seasonality=weekday_seasonality)\n\n def predict(\n self,\n media: jnp.ndarray,\n extra_features: Optional[jnp.ndarray] = None,\n media_gap: Optional[jnp.ndarray] = None,\n target_scaler: Optional[preprocessing.CustomScaler] = None,\n seed: Optional[int] = None\n ) -> jnp.ndarray:\n \"\"\"Runs the model to obtain predictions for the given input data.\n\n Predictions returned are distributions, if point estimates are desired one\n can calculate those based on the given distribution.\n\n Args:\n media: Media array for needed for the model to run predictions.\n extra_features: Extra features for needed for the model to run.\n media_gap: Media data gap between the end of training data and the start\n of the out of sample media given. Eg. if 100 weeks of data were used for\n training and prediction starts 2 months after training data finished we\n need to provide the 8 weeks missing between the training data and the\n prediction data so data transformations (adstock, carryover, ...) can\n take place correctly.\n target_scaler: Scaler that was used to scale the target before training.\n seed: Seed to use for PRNGKey during sampling. For replicability run\n this function and any other function that utilises predictions with the\n same seed.\n\n Returns:\n Predictions for the given media and extra features at a given date index.\n\n Raises:\n NotFittedModelError: When the model has not been fitted before running\n predict.\n \"\"\"\n if not hasattr(self, \"trace\"):\n raise NotFittedModelError(\"Need to fit the model before running \"\n \"predictions.\")\n if media_gap is not None:\n if media.ndim != media_gap.ndim:\n raise ValueError(\"Original media data and media gap must have the same \"\n \"number of dimensions.\")\n if media.ndim > 1 and media.shape[1] != media_gap.shape[1]:\n raise ValueError(\"Media gap must have the same numer of media channels\"\n \"as the original media data.\")\n previous_media = jnp.concatenate(arrays=[self.media, media_gap], axis=0)\n if extra_features is not None:\n previous_extra_features = jnp.concatenate(\n arrays=[\n self._extra_features,\n jnp.zeros((media_gap.shape[0], *self._extra_features.shape[1:]))\n ],\n axis=0)\n else:\n previous_media = self.media\n previous_extra_features = self._extra_features\n\n full_media = jnp.concatenate(arrays=[previous_media, media], axis=0)\n if extra_features is not None:\n full_extra_features = jnp.concatenate(\n arrays=[previous_extra_features, extra_features], axis=0)\n else:\n full_extra_features = None\n if seed is None:\n seed = utils.get_time_seed()\n prediction = self._predict(\n rng_key=jax.random.PRNGKey(seed=seed),\n media_data=full_media,\n extra_features=full_extra_features,\n media_prior=jnp.array(self._media_prior),\n degrees_seasonality=self._degrees_seasonality,\n frequency=self._seasonality_frequency,\n weekday_seasonality=self._weekday_seasonality,\n transform_function=self._model_transform_function,\n model=self._model_function,\n custom_priors=self.custom_priors,\n posterior_samples=self.trace)[\"mu\"][:, previous_media.shape[0]:]\n if target_scaler:\n prediction = target_scaler.inverse_transform(prediction)\n\n return prediction\n\n def reduce_trace(self, nsample: int = 100, seed: int = 0) -> None:\n \"\"\"Reduces the samples in `trace` to speed up `predict` and optimize.\n\n Please note this step is not reversible. Only do this after you have\n investigated convergence of the model.\n\n Args:\n nsample: Target number of samples.\n seed: Random seed for down sampling.\n\n Raises:\n ValueError: if `nsample` is too big.\n \"\"\"", "metadata": {"task_id": "google--lightweight_mmm/63", "ground_truth": " ntrace = len(self.trace[\"sigma\"])\n if ntrace < nsample:\n raise ValueError(\"nsample is bigger than the actual posterior samples\")\n key = jax.random.PRNGKey(seed)\n samples = jax.random.choice(key, ntrace, (nsample,), replace=False)\n for name in self.trace.keys():\n self.trace[name] = self.trace[name][samples]\n logging.info(\"Reduction is complete\")\n", "fpath_tuple": ["google_lightweight_mmm", "lightweight_mmm", "lightweight_mmm.py"], "context_start_lineno": 210, "lineno": 548, "function_name": "reduce_trace"}, "groundtruth": " ntrace = len(self.trace[\"sigma\"])\n if ntrace < nsample:\n raise ValueError(\"nsample is bigger than the actual posterior samples\")\n key = jax.random.PRNGKey(seed)\n samples = jax.random.choice(key, ntrace, (nsample,), replace=False)\n for name in self.trace.keys():\n self.trace[name] = self.trace[name][samples]\n logging.info(\"Reduction is complete\")\n"} +{"prompt": "import abc\nfrom typing import Union\n\nimport numpy as np\nimport torch\nimport tqdm\n\n\nclass IdentitySampler:\n def run(\n self, features: Union[torch.Tensor, np.ndarray]\n ) -> Union[torch.Tensor, np.ndarray]:\n return features\n\n\nclass BaseSampler(abc.ABC):\n def __init__(self, percentage: float):", "metadata": {"task_id": "amazon-science--patchcore-inspection/0", "ground_truth": " if not 0 < percentage < 1:\n raise ValueError(\"Percentage value not in (0, 1).\")\n self.percentage = percentage\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "sampler.py"], "context_start_lineno": 0, "lineno": 17, "function_name": "__init__"}, "groundtruth": " if not 0 < percentage < 1:\n raise ValueError(\"Percentage value not in (0, 1).\")\n self.percentage = percentage\n"} +{"prompt": "import abc\nfrom typing import Union\n\nimport numpy as np\nimport torch\nimport tqdm\n\n\nclass IdentitySampler:\n def run(\n self, features: Union[torch.Tensor, np.ndarray]\n ) -> Union[torch.Tensor, np.ndarray]:\n return features\n\n\nclass BaseSampler(abc.ABC):\n def __init__(self, percentage: float):\n if not 0 < percentage < 1:\n raise ValueError(\"Percentage value not in (0, 1).\")\n self.percentage = percentage\n\n @abc.abstractmethod\n def run(\n self, features: Union[torch.Tensor, np.ndarray]\n ) -> Union[torch.Tensor, np.ndarray]:\n pass\n\n def _store_type(self, features: Union[torch.Tensor, np.ndarray]) -> None:\n self.features_is_numpy = isinstance(features, np.ndarray)\n if not self.features_is_numpy:\n self.features_device = features.device\n\n def _restore_type(self, features: torch.Tensor) -> Union[torch.Tensor, np.ndarray]:\n if self.features_is_numpy:\n return features.cpu().numpy()\n return features.to(self.features_device)\n\n\nclass GreedyCoresetSampler(BaseSampler):\n def __init__(\n self,\n percentage: float,\n device: torch.device,\n dimension_to_project_features_to=128,\n ):\n \"\"\"Greedy Coreset sampling base class.\"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/1", "ground_truth": " super().__init__(percentage)\n\n self.device = device\n self.dimension_to_project_features_to = dimension_to_project_features_to\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "sampler.py"], "context_start_lineno": 0, "lineno": 46, "function_name": "__init__"}, "groundtruth": " super().__init__(percentage)\n\n self.device = device\n self.dimension_to_project_features_to = dimension_to_project_features_to\n"} +{"prompt": "\"\"\"PatchCore and PatchCore detection methods.\"\"\"\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tqdm\n\nimport patchcore\nimport patchcore.backbones\nimport patchcore.common\nimport patchcore.sampler\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PatchCore(torch.nn.Module):\n def __init__(self, device):\n \"\"\"PatchCore anomaly detection class.\"\"\"\n super(PatchCore, self).__init__()\n self.device = device\n\n def load(\n self,\n backbone,\n layers_to_extract_from,\n device,\n input_shape,\n pretrain_embed_dimension,\n target_embed_dimension,\n patchsize=3,\n patchstride=1,\n anomaly_score_num_nn=1,\n featuresampler=patchcore.sampler.IdentitySampler(),\n nn_method=patchcore.common.FaissNN(False, 4),\n **kwargs,\n ):\n self.backbone = backbone.to(device)\n self.layers_to_extract_from = layers_to_extract_from\n self.input_shape = input_shape\n\n self.device = device\n self.patch_maker = PatchMaker(patchsize, stride=patchstride)\n\n self.forward_modules = torch.nn.ModuleDict({})\n\n feature_aggregator = patchcore.common.NetworkFeatureAggregator(\n self.backbone, self.layers_to_extract_from, self.device\n )\n feature_dimensions = feature_aggregator.feature_dimensions(input_shape)\n self.forward_modules[\"feature_aggregator\"] = feature_aggregator\n\n preprocessing = patchcore.common.Preprocessing(\n feature_dimensions, pretrain_embed_dimension\n )\n self.forward_modules[\"preprocessing\"] = preprocessing\n\n self.target_embed_dimension = target_embed_dimension\n preadapt_aggregator = patchcore.common.Aggregator(\n target_dim=target_embed_dimension\n )\n\n _ = preadapt_aggregator.to(self.device)\n\n self.forward_modules[\"preadapt_aggregator\"] = preadapt_aggregator\n\n self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(\n n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method\n )\n\n self.anomaly_segmentor = patchcore.common.RescaleSegmentor(\n device=self.device, target_size=input_shape[-2:]\n )\n\n self.featuresampler = featuresampler\n\n def embed(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n features = []\n for image in data:\n if isinstance(image, dict):\n image = image[\"image\"]\n with torch.no_grad():\n input_image = image.to(torch.float).to(self.device)\n features.append(self._embed(input_image))\n return features\n return self._embed(data)\n\n def _embed(self, images, detach=True, provide_patch_shapes=False):\n \"\"\"Returns feature embeddings for images.\"\"\"\n\n def _detach(features):", "metadata": {"task_id": "amazon-science--patchcore-inspection/2", "ground_truth": " if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "patchcore.py"], "context_start_lineno": 0, "lineno": 94, "function_name": "_detach"}, "groundtruth": " if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n"} +{"prompt": "\"\"\"PatchCore and PatchCore detection methods.\"\"\"\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tqdm\n\nimport patchcore\nimport patchcore.backbones\nimport patchcore.common\nimport patchcore.sampler\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PatchCore(torch.nn.Module):\n def __init__(self, device):\n \"\"\"PatchCore anomaly detection class.\"\"\"\n super(PatchCore, self).__init__()\n self.device = device\n\n def load(\n self,\n backbone,\n layers_to_extract_from,\n device,\n input_shape,\n pretrain_embed_dimension,\n target_embed_dimension,\n patchsize=3,\n patchstride=1,\n anomaly_score_num_nn=1,\n featuresampler=patchcore.sampler.IdentitySampler(),\n nn_method=patchcore.common.FaissNN(False, 4),\n **kwargs,\n ):\n self.backbone = backbone.to(device)\n self.layers_to_extract_from = layers_to_extract_from\n self.input_shape = input_shape\n\n self.device = device\n self.patch_maker = PatchMaker(patchsize, stride=patchstride)\n\n self.forward_modules = torch.nn.ModuleDict({})\n\n feature_aggregator = patchcore.common.NetworkFeatureAggregator(\n self.backbone, self.layers_to_extract_from, self.device\n )\n feature_dimensions = feature_aggregator.feature_dimensions(input_shape)\n self.forward_modules[\"feature_aggregator\"] = feature_aggregator\n\n preprocessing = patchcore.common.Preprocessing(\n feature_dimensions, pretrain_embed_dimension\n )\n self.forward_modules[\"preprocessing\"] = preprocessing\n\n self.target_embed_dimension = target_embed_dimension\n preadapt_aggregator = patchcore.common.Aggregator(\n target_dim=target_embed_dimension\n )\n\n _ = preadapt_aggregator.to(self.device)\n\n self.forward_modules[\"preadapt_aggregator\"] = preadapt_aggregator\n\n self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(\n n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method\n )\n\n self.anomaly_segmentor = patchcore.common.RescaleSegmentor(\n device=self.device, target_size=input_shape[-2:]\n )\n\n self.featuresampler = featuresampler\n\n def embed(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n features = []\n for image in data:\n if isinstance(image, dict):\n image = image[\"image\"]\n with torch.no_grad():\n input_image = image.to(torch.float).to(self.device)\n features.append(self._embed(input_image))\n return features\n return self._embed(data)\n\n def _embed(self, images, detach=True, provide_patch_shapes=False):\n \"\"\"Returns feature embeddings for images.\"\"\"\n\n def _detach(features):\n if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n\n _ = self.forward_modules[\"feature_aggregator\"].eval()\n with torch.no_grad():\n features = self.forward_modules[\"feature_aggregator\"](images)\n\n features = [features[layer] for layer in self.layers_to_extract_from]\n\n features = [\n self.patch_maker.patchify(x, return_spatial_info=True) for x in features\n ]\n patch_shapes = [x[1] for x in features]\n features = [x[0] for x in features]\n ref_num_patches = patch_shapes[0]\n\n for i in range(1, len(features)):\n _features = features[i]\n patch_dims = patch_shapes[i]\n\n # TODO(pgehler): Add comments\n _features = _features.reshape(\n _features.shape[0], patch_dims[0], patch_dims[1], *_features.shape[2:]\n )\n _features = _features.permute(0, -3, -2, -1, 1, 2)\n perm_base_shape = _features.shape\n _features = _features.reshape(-1, *_features.shape[-2:])\n _features = F.interpolate(\n _features.unsqueeze(1),\n size=(ref_num_patches[0], ref_num_patches[1]),\n mode=\"bilinear\",\n align_corners=False,\n )\n _features = _features.squeeze(1)\n _features = _features.reshape(\n *perm_base_shape[:-2], ref_num_patches[0], ref_num_patches[1]\n )\n _features = _features.permute(0, -2, -1, 1, 2, 3)\n _features = _features.reshape(len(_features), -1, *_features.shape[-3:])\n features[i] = _features\n features = [x.reshape(-1, *x.shape[-3:]) for x in features]\n\n # As different feature backbones & patching provide differently\n # sized features, these are brought into the correct form here.\n features = self.forward_modules[\"preprocessing\"](features)\n features = self.forward_modules[\"preadapt_aggregator\"](features)\n\n if provide_patch_shapes:\n return _detach(features), patch_shapes\n return _detach(features)\n\n def fit(self, training_data):\n \"\"\"PatchCore training.\n\n This function computes the embeddings of the training data and fills the\n memory bank of SPADE.\n \"\"\"\n self._fill_memory_bank(training_data)\n\n def _fill_memory_bank(self, input_data):\n \"\"\"Computes and sets the support features for SPADE.\"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/3", "ground_truth": " _ = self.forward_modules.eval()\n\n def _image_to_features(input_image):\n with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n\n features = []\n with tqdm.tqdm(\n input_data, desc=\"Computing support features...\", position=1, leave=False\n ) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n image = image[\"image\"]\n features.append(_image_to_features(image))\n\n features = np.concatenate(features, axis=0)\n features = self.featuresampler.run(features)\n\n self.anomaly_scorer.fit(detection_features=[features])\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "patchcore.py"], "context_start_lineno": 0, "lineno": 156, "function_name": "_fill_memory_bank"}, "groundtruth": " _ = self.forward_modules.eval()\n\n def _image_to_features(input_image):\n with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n\n features = []\n with tqdm.tqdm(\n input_data, desc=\"Computing support features...\", position=1, leave=False\n ) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n image = image[\"image\"]\n features.append(_image_to_features(image))\n\n features = np.concatenate(features, axis=0)\n features = self.featuresampler.run(features)\n\n self.anomaly_scorer.fit(detection_features=[features])\n"} +{"prompt": "\"\"\"PatchCore and PatchCore detection methods.\"\"\"\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tqdm\n\nimport patchcore\nimport patchcore.backbones\nimport patchcore.common\nimport patchcore.sampler\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PatchCore(torch.nn.Module):\n def __init__(self, device):\n \"\"\"PatchCore anomaly detection class.\"\"\"\n super(PatchCore, self).__init__()\n self.device = device\n\n def load(\n self,\n backbone,\n layers_to_extract_from,\n device,\n input_shape,\n pretrain_embed_dimension,\n target_embed_dimension,\n patchsize=3,\n patchstride=1,\n anomaly_score_num_nn=1,\n featuresampler=patchcore.sampler.IdentitySampler(),\n nn_method=patchcore.common.FaissNN(False, 4),\n **kwargs,\n ):\n self.backbone = backbone.to(device)\n self.layers_to_extract_from = layers_to_extract_from\n self.input_shape = input_shape\n\n self.device = device\n self.patch_maker = PatchMaker(patchsize, stride=patchstride)\n\n self.forward_modules = torch.nn.ModuleDict({})\n\n feature_aggregator = patchcore.common.NetworkFeatureAggregator(\n self.backbone, self.layers_to_extract_from, self.device\n )\n feature_dimensions = feature_aggregator.feature_dimensions(input_shape)\n self.forward_modules[\"feature_aggregator\"] = feature_aggregator\n\n preprocessing = patchcore.common.Preprocessing(\n feature_dimensions, pretrain_embed_dimension\n )\n self.forward_modules[\"preprocessing\"] = preprocessing\n\n self.target_embed_dimension = target_embed_dimension\n preadapt_aggregator = patchcore.common.Aggregator(\n target_dim=target_embed_dimension\n )\n\n _ = preadapt_aggregator.to(self.device)\n\n self.forward_modules[\"preadapt_aggregator\"] = preadapt_aggregator\n\n self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(\n n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method\n )\n\n self.anomaly_segmentor = patchcore.common.RescaleSegmentor(\n device=self.device, target_size=input_shape[-2:]\n )\n\n self.featuresampler = featuresampler\n\n def embed(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n features = []\n for image in data:\n if isinstance(image, dict):\n image = image[\"image\"]\n with torch.no_grad():\n input_image = image.to(torch.float).to(self.device)\n features.append(self._embed(input_image))\n return features\n return self._embed(data)\n\n def _embed(self, images, detach=True, provide_patch_shapes=False):\n \"\"\"Returns feature embeddings for images.\"\"\"\n\n def _detach(features):\n if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n\n _ = self.forward_modules[\"feature_aggregator\"].eval()\n with torch.no_grad():\n features = self.forward_modules[\"feature_aggregator\"](images)\n\n features = [features[layer] for layer in self.layers_to_extract_from]\n\n features = [\n self.patch_maker.patchify(x, return_spatial_info=True) for x in features\n ]\n patch_shapes = [x[1] for x in features]\n features = [x[0] for x in features]\n ref_num_patches = patch_shapes[0]\n\n for i in range(1, len(features)):\n _features = features[i]\n patch_dims = patch_shapes[i]\n\n # TODO(pgehler): Add comments\n _features = _features.reshape(\n _features.shape[0], patch_dims[0], patch_dims[1], *_features.shape[2:]\n )\n _features = _features.permute(0, -3, -2, -1, 1, 2)\n perm_base_shape = _features.shape\n _features = _features.reshape(-1, *_features.shape[-2:])\n _features = F.interpolate(\n _features.unsqueeze(1),\n size=(ref_num_patches[0], ref_num_patches[1]),\n mode=\"bilinear\",\n align_corners=False,\n )\n _features = _features.squeeze(1)\n _features = _features.reshape(\n *perm_base_shape[:-2], ref_num_patches[0], ref_num_patches[1]\n )\n _features = _features.permute(0, -2, -1, 1, 2, 3)\n _features = _features.reshape(len(_features), -1, *_features.shape[-3:])\n features[i] = _features\n features = [x.reshape(-1, *x.shape[-3:]) for x in features]\n\n # As different feature backbones & patching provide differently\n # sized features, these are brought into the correct form here.\n features = self.forward_modules[\"preprocessing\"](features)\n features = self.forward_modules[\"preadapt_aggregator\"](features)\n\n if provide_patch_shapes:\n return _detach(features), patch_shapes\n return _detach(features)\n\n def fit(self, training_data):\n \"\"\"PatchCore training.\n\n This function computes the embeddings of the training data and fills the\n memory bank of SPADE.\n \"\"\"\n self._fill_memory_bank(training_data)\n\n def _fill_memory_bank(self, input_data):\n \"\"\"Computes and sets the support features for SPADE.\"\"\"\n _ = self.forward_modules.eval()\n\n def _image_to_features(input_image):", "metadata": {"task_id": "amazon-science--patchcore-inspection/4", "ground_truth": " with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "patchcore.py"], "context_start_lineno": 0, "lineno": 159, "function_name": "_image_to_features"}, "groundtruth": " with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n"} +{"prompt": "\"\"\"PatchCore and PatchCore detection methods.\"\"\"\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tqdm\n\nimport patchcore\nimport patchcore.backbones\nimport patchcore.common\nimport patchcore.sampler\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PatchCore(torch.nn.Module):\n def __init__(self, device):\n \"\"\"PatchCore anomaly detection class.\"\"\"\n super(PatchCore, self).__init__()\n self.device = device\n\n def load(\n self,\n backbone,\n layers_to_extract_from,\n device,\n input_shape,\n pretrain_embed_dimension,\n target_embed_dimension,\n patchsize=3,\n patchstride=1,\n anomaly_score_num_nn=1,\n featuresampler=patchcore.sampler.IdentitySampler(),\n nn_method=patchcore.common.FaissNN(False, 4),\n **kwargs,\n ):\n self.backbone = backbone.to(device)\n self.layers_to_extract_from = layers_to_extract_from\n self.input_shape = input_shape\n\n self.device = device\n self.patch_maker = PatchMaker(patchsize, stride=patchstride)\n\n self.forward_modules = torch.nn.ModuleDict({})\n\n feature_aggregator = patchcore.common.NetworkFeatureAggregator(\n self.backbone, self.layers_to_extract_from, self.device\n )\n feature_dimensions = feature_aggregator.feature_dimensions(input_shape)\n self.forward_modules[\"feature_aggregator\"] = feature_aggregator\n\n preprocessing = patchcore.common.Preprocessing(\n feature_dimensions, pretrain_embed_dimension\n )\n self.forward_modules[\"preprocessing\"] = preprocessing\n\n self.target_embed_dimension = target_embed_dimension\n preadapt_aggregator = patchcore.common.Aggregator(\n target_dim=target_embed_dimension\n )\n\n _ = preadapt_aggregator.to(self.device)\n\n self.forward_modules[\"preadapt_aggregator\"] = preadapt_aggregator\n\n self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(\n n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method\n )\n\n self.anomaly_segmentor = patchcore.common.RescaleSegmentor(\n device=self.device, target_size=input_shape[-2:]\n )\n\n self.featuresampler = featuresampler\n\n def embed(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n features = []\n for image in data:\n if isinstance(image, dict):\n image = image[\"image\"]\n with torch.no_grad():\n input_image = image.to(torch.float).to(self.device)\n features.append(self._embed(input_image))\n return features\n return self._embed(data)\n\n def _embed(self, images, detach=True, provide_patch_shapes=False):\n \"\"\"Returns feature embeddings for images.\"\"\"\n\n def _detach(features):\n if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n\n _ = self.forward_modules[\"feature_aggregator\"].eval()\n with torch.no_grad():\n features = self.forward_modules[\"feature_aggregator\"](images)\n\n features = [features[layer] for layer in self.layers_to_extract_from]\n\n features = [\n self.patch_maker.patchify(x, return_spatial_info=True) for x in features\n ]\n patch_shapes = [x[1] for x in features]\n features = [x[0] for x in features]\n ref_num_patches = patch_shapes[0]\n\n for i in range(1, len(features)):\n _features = features[i]\n patch_dims = patch_shapes[i]\n\n # TODO(pgehler): Add comments\n _features = _features.reshape(\n _features.shape[0], patch_dims[0], patch_dims[1], *_features.shape[2:]\n )\n _features = _features.permute(0, -3, -2, -1, 1, 2)\n perm_base_shape = _features.shape\n _features = _features.reshape(-1, *_features.shape[-2:])\n _features = F.interpolate(\n _features.unsqueeze(1),\n size=(ref_num_patches[0], ref_num_patches[1]),\n mode=\"bilinear\",\n align_corners=False,\n )\n _features = _features.squeeze(1)\n _features = _features.reshape(\n *perm_base_shape[:-2], ref_num_patches[0], ref_num_patches[1]\n )\n _features = _features.permute(0, -2, -1, 1, 2, 3)\n _features = _features.reshape(len(_features), -1, *_features.shape[-3:])\n features[i] = _features\n features = [x.reshape(-1, *x.shape[-3:]) for x in features]\n\n # As different feature backbones & patching provide differently\n # sized features, these are brought into the correct form here.\n features = self.forward_modules[\"preprocessing\"](features)\n features = self.forward_modules[\"preadapt_aggregator\"](features)\n\n if provide_patch_shapes:\n return _detach(features), patch_shapes\n return _detach(features)\n\n def fit(self, training_data):\n \"\"\"PatchCore training.\n\n This function computes the embeddings of the training data and fills the\n memory bank of SPADE.\n \"\"\"\n self._fill_memory_bank(training_data)\n\n def _fill_memory_bank(self, input_data):\n \"\"\"Computes and sets the support features for SPADE.\"\"\"\n _ = self.forward_modules.eval()\n\n def _image_to_features(input_image):\n with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n\n features = []\n with tqdm.tqdm(\n input_data, desc=\"Computing support features...\", position=1, leave=False\n ) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n image = image[\"image\"]\n features.append(_image_to_features(image))\n\n features = np.concatenate(features, axis=0)\n features = self.featuresampler.run(features)\n\n self.anomaly_scorer.fit(detection_features=[features])\n\n def predict(self, data):", "metadata": {"task_id": "amazon-science--patchcore-inspection/5", "ground_truth": " if isinstance(data, torch.utils.data.DataLoader):\n return self._predict_dataloader(data)\n return self._predict(data)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "patchcore.py"], "context_start_lineno": 0, "lineno": 178, "function_name": "predict"}, "groundtruth": " if isinstance(data, torch.utils.data.DataLoader):\n return self._predict_dataloader(data)\n return self._predict(data)\n"} +{"prompt": "\"\"\"PatchCore and PatchCore detection methods.\"\"\"\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tqdm\n\nimport patchcore\nimport patchcore.backbones\nimport patchcore.common\nimport patchcore.sampler\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PatchCore(torch.nn.Module):\n def __init__(self, device):\n \"\"\"PatchCore anomaly detection class.\"\"\"\n super(PatchCore, self).__init__()\n self.device = device\n\n def load(\n self,\n backbone,\n layers_to_extract_from,\n device,\n input_shape,\n pretrain_embed_dimension,\n target_embed_dimension,\n patchsize=3,\n patchstride=1,\n anomaly_score_num_nn=1,\n featuresampler=patchcore.sampler.IdentitySampler(),\n nn_method=patchcore.common.FaissNN(False, 4),\n **kwargs,\n ):\n self.backbone = backbone.to(device)\n self.layers_to_extract_from = layers_to_extract_from\n self.input_shape = input_shape\n\n self.device = device\n self.patch_maker = PatchMaker(patchsize, stride=patchstride)\n\n self.forward_modules = torch.nn.ModuleDict({})\n\n feature_aggregator = patchcore.common.NetworkFeatureAggregator(\n self.backbone, self.layers_to_extract_from, self.device\n )\n feature_dimensions = feature_aggregator.feature_dimensions(input_shape)\n self.forward_modules[\"feature_aggregator\"] = feature_aggregator\n\n preprocessing = patchcore.common.Preprocessing(\n feature_dimensions, pretrain_embed_dimension\n )\n self.forward_modules[\"preprocessing\"] = preprocessing\n\n self.target_embed_dimension = target_embed_dimension\n preadapt_aggregator = patchcore.common.Aggregator(\n target_dim=target_embed_dimension\n )\n\n _ = preadapt_aggregator.to(self.device)\n\n self.forward_modules[\"preadapt_aggregator\"] = preadapt_aggregator\n\n self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(\n n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method\n )\n\n self.anomaly_segmentor = patchcore.common.RescaleSegmentor(\n device=self.device, target_size=input_shape[-2:]\n )\n\n self.featuresampler = featuresampler\n\n def embed(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n features = []\n for image in data:\n if isinstance(image, dict):\n image = image[\"image\"]\n with torch.no_grad():\n input_image = image.to(torch.float).to(self.device)\n features.append(self._embed(input_image))\n return features\n return self._embed(data)\n\n def _embed(self, images, detach=True, provide_patch_shapes=False):\n \"\"\"Returns feature embeddings for images.\"\"\"\n\n def _detach(features):\n if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n\n _ = self.forward_modules[\"feature_aggregator\"].eval()\n with torch.no_grad():\n features = self.forward_modules[\"feature_aggregator\"](images)\n\n features = [features[layer] for layer in self.layers_to_extract_from]\n\n features = [\n self.patch_maker.patchify(x, return_spatial_info=True) for x in features\n ]\n patch_shapes = [x[1] for x in features]\n features = [x[0] for x in features]\n ref_num_patches = patch_shapes[0]\n\n for i in range(1, len(features)):\n _features = features[i]\n patch_dims = patch_shapes[i]\n\n # TODO(pgehler): Add comments\n _features = _features.reshape(\n _features.shape[0], patch_dims[0], patch_dims[1], *_features.shape[2:]\n )\n _features = _features.permute(0, -3, -2, -1, 1, 2)\n perm_base_shape = _features.shape\n _features = _features.reshape(-1, *_features.shape[-2:])\n _features = F.interpolate(\n _features.unsqueeze(1),\n size=(ref_num_patches[0], ref_num_patches[1]),\n mode=\"bilinear\",\n align_corners=False,\n )\n _features = _features.squeeze(1)\n _features = _features.reshape(\n *perm_base_shape[:-2], ref_num_patches[0], ref_num_patches[1]\n )\n _features = _features.permute(0, -2, -1, 1, 2, 3)\n _features = _features.reshape(len(_features), -1, *_features.shape[-3:])\n features[i] = _features\n features = [x.reshape(-1, *x.shape[-3:]) for x in features]\n\n # As different feature backbones & patching provide differently\n # sized features, these are brought into the correct form here.\n features = self.forward_modules[\"preprocessing\"](features)\n features = self.forward_modules[\"preadapt_aggregator\"](features)\n\n if provide_patch_shapes:\n return _detach(features), patch_shapes\n return _detach(features)\n\n def fit(self, training_data):\n \"\"\"PatchCore training.\n\n This function computes the embeddings of the training data and fills the\n memory bank of SPADE.\n \"\"\"\n self._fill_memory_bank(training_data)\n\n def _fill_memory_bank(self, input_data):\n \"\"\"Computes and sets the support features for SPADE.\"\"\"\n _ = self.forward_modules.eval()\n\n def _image_to_features(input_image):\n with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n\n features = []\n with tqdm.tqdm(\n input_data, desc=\"Computing support features...\", position=1, leave=False\n ) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n image = image[\"image\"]\n features.append(_image_to_features(image))\n\n features = np.concatenate(features, axis=0)\n features = self.featuresampler.run(features)\n\n self.anomaly_scorer.fit(detection_features=[features])\n\n def predict(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n return self._predict_dataloader(data)\n return self._predict(data)\n\n def _predict_dataloader(self, dataloader):\n \"\"\"This function provides anomaly scores/maps for full dataloaders.\"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/6", "ground_truth": " _ = self.forward_modules.eval()\n\n scores = []\n masks = []\n labels_gt = []\n masks_gt = []\n with tqdm.tqdm(dataloader, desc=\"Inferring...\", leave=False) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n labels_gt.extend(image[\"is_anomaly\"].numpy().tolist())\n masks_gt.extend(image[\"mask\"].numpy().tolist())\n image = image[\"image\"]\n _scores, _masks = self._predict(image)\n for score, mask in zip(_scores, _masks):\n scores.append(score)\n masks.append(mask)\n return scores, masks, labels_gt, masks_gt\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "patchcore.py"], "context_start_lineno": 0, "lineno": 184, "function_name": "_predict_dataloader"}, "groundtruth": " _ = self.forward_modules.eval()\n\n scores = []\n masks = []\n labels_gt = []\n masks_gt = []\n with tqdm.tqdm(dataloader, desc=\"Inferring...\", leave=False) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n labels_gt.extend(image[\"is_anomaly\"].numpy().tolist())\n masks_gt.extend(image[\"mask\"].numpy().tolist())\n image = image[\"image\"]\n _scores, _masks = self._predict(image)\n for score, mask in zip(_scores, _masks):\n scores.append(score)\n masks.append(mask)\n return scores, masks, labels_gt, masks_gt\n"} +{"prompt": "\"\"\"PatchCore and PatchCore detection methods.\"\"\"\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tqdm\n\nimport patchcore\nimport patchcore.backbones\nimport patchcore.common\nimport patchcore.sampler\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PatchCore(torch.nn.Module):\n def __init__(self, device):\n \"\"\"PatchCore anomaly detection class.\"\"\"\n super(PatchCore, self).__init__()\n self.device = device\n\n def load(\n self,\n backbone,\n layers_to_extract_from,\n device,\n input_shape,\n pretrain_embed_dimension,\n target_embed_dimension,\n patchsize=3,\n patchstride=1,\n anomaly_score_num_nn=1,\n featuresampler=patchcore.sampler.IdentitySampler(),\n nn_method=patchcore.common.FaissNN(False, 4),\n **kwargs,\n ):\n self.backbone = backbone.to(device)\n self.layers_to_extract_from = layers_to_extract_from\n self.input_shape = input_shape\n\n self.device = device\n self.patch_maker = PatchMaker(patchsize, stride=patchstride)\n\n self.forward_modules = torch.nn.ModuleDict({})\n\n feature_aggregator = patchcore.common.NetworkFeatureAggregator(\n self.backbone, self.layers_to_extract_from, self.device\n )\n feature_dimensions = feature_aggregator.feature_dimensions(input_shape)\n self.forward_modules[\"feature_aggregator\"] = feature_aggregator\n\n preprocessing = patchcore.common.Preprocessing(\n feature_dimensions, pretrain_embed_dimension\n )\n self.forward_modules[\"preprocessing\"] = preprocessing\n\n self.target_embed_dimension = target_embed_dimension\n preadapt_aggregator = patchcore.common.Aggregator(\n target_dim=target_embed_dimension\n )\n\n _ = preadapt_aggregator.to(self.device)\n\n self.forward_modules[\"preadapt_aggregator\"] = preadapt_aggregator\n\n self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(\n n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method\n )\n\n self.anomaly_segmentor = patchcore.common.RescaleSegmentor(\n device=self.device, target_size=input_shape[-2:]\n )\n\n self.featuresampler = featuresampler\n\n def embed(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n features = []\n for image in data:\n if isinstance(image, dict):\n image = image[\"image\"]\n with torch.no_grad():\n input_image = image.to(torch.float).to(self.device)\n features.append(self._embed(input_image))\n return features\n return self._embed(data)\n\n def _embed(self, images, detach=True, provide_patch_shapes=False):\n \"\"\"Returns feature embeddings for images.\"\"\"\n\n def _detach(features):\n if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n\n _ = self.forward_modules[\"feature_aggregator\"].eval()\n with torch.no_grad():\n features = self.forward_modules[\"feature_aggregator\"](images)\n\n features = [features[layer] for layer in self.layers_to_extract_from]\n\n features = [\n self.patch_maker.patchify(x, return_spatial_info=True) for x in features\n ]\n patch_shapes = [x[1] for x in features]\n features = [x[0] for x in features]\n ref_num_patches = patch_shapes[0]\n\n for i in range(1, len(features)):\n _features = features[i]\n patch_dims = patch_shapes[i]\n\n # TODO(pgehler): Add comments\n _features = _features.reshape(\n _features.shape[0], patch_dims[0], patch_dims[1], *_features.shape[2:]\n )\n _features = _features.permute(0, -3, -2, -1, 1, 2)\n perm_base_shape = _features.shape\n _features = _features.reshape(-1, *_features.shape[-2:])\n _features = F.interpolate(\n _features.unsqueeze(1),\n size=(ref_num_patches[0], ref_num_patches[1]),\n mode=\"bilinear\",\n align_corners=False,\n )\n _features = _features.squeeze(1)\n _features = _features.reshape(\n *perm_base_shape[:-2], ref_num_patches[0], ref_num_patches[1]\n )\n _features = _features.permute(0, -2, -1, 1, 2, 3)\n _features = _features.reshape(len(_features), -1, *_features.shape[-3:])\n features[i] = _features\n features = [x.reshape(-1, *x.shape[-3:]) for x in features]\n\n # As different feature backbones & patching provide differently\n # sized features, these are brought into the correct form here.\n features = self.forward_modules[\"preprocessing\"](features)\n features = self.forward_modules[\"preadapt_aggregator\"](features)\n\n if provide_patch_shapes:\n return _detach(features), patch_shapes\n return _detach(features)\n\n def fit(self, training_data):\n \"\"\"PatchCore training.\n\n This function computes the embeddings of the training data and fills the\n memory bank of SPADE.\n \"\"\"\n self._fill_memory_bank(training_data)\n\n def _fill_memory_bank(self, input_data):\n \"\"\"Computes and sets the support features for SPADE.\"\"\"\n _ = self.forward_modules.eval()\n\n def _image_to_features(input_image):\n with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n\n features = []\n with tqdm.tqdm(\n input_data, desc=\"Computing support features...\", position=1, leave=False\n ) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n image = image[\"image\"]\n features.append(_image_to_features(image))\n\n features = np.concatenate(features, axis=0)\n features = self.featuresampler.run(features)\n\n self.anomaly_scorer.fit(detection_features=[features])\n\n def predict(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n return self._predict_dataloader(data)\n return self._predict(data)\n\n def _predict_dataloader(self, dataloader):\n \"\"\"This function provides anomaly scores/maps for full dataloaders.\"\"\"\n _ = self.forward_modules.eval()\n\n scores = []\n masks = []\n labels_gt = []\n masks_gt = []\n with tqdm.tqdm(dataloader, desc=\"Inferring...\", leave=False) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n labels_gt.extend(image[\"is_anomaly\"].numpy().tolist())\n masks_gt.extend(image[\"mask\"].numpy().tolist())\n image = image[\"image\"]\n _scores, _masks = self._predict(image)\n for score, mask in zip(_scores, _masks):\n scores.append(score)\n masks.append(mask)\n return scores, masks, labels_gt, masks_gt\n\n def _predict(self, images):\n \"\"\"Infer score and mask for a batch of images.\"\"\"\n images = images.to(torch.float).to(self.device)\n _ = self.forward_modules.eval()\n\n batchsize = images.shape[0]\n with torch.no_grad():\n features, patch_shapes = self._embed(images, provide_patch_shapes=True)\n features = np.asarray(features)\n\n patch_scores = image_scores = self.anomaly_scorer.predict([features])[0]\n image_scores = self.patch_maker.unpatch_scores(\n image_scores, batchsize=batchsize\n )\n image_scores = image_scores.reshape(*image_scores.shape[:2], -1)\n image_scores = self.patch_maker.score(image_scores)\n\n patch_scores = self.patch_maker.unpatch_scores(\n patch_scores, batchsize=batchsize\n )\n scales = patch_shapes[0]\n patch_scores = patch_scores.reshape(batchsize, scales[0], scales[1])\n\n masks = self.anomaly_segmentor.convert_to_segmentation(patch_scores)\n\n return [score for score in image_scores], [mask for mask in masks]\n\n @staticmethod\n def _params_file(filepath, prepend=\"\"):\n return os.path.join(filepath, prepend + \"patchcore_params.pkl\")\n\n def save_to_path(self, save_path: str, prepend: str = \"\") -> None:", "metadata": {"task_id": "amazon-science--patchcore-inspection/7", "ground_truth": " LOGGER.info(\"Saving PatchCore data.\")\n self.anomaly_scorer.save(\n save_path, save_features_separately=False, prepend=prepend\n )\n patchcore_params = {\n \"backbone.name\": self.backbone.name,\n \"layers_to_extract_from\": self.layers_to_extract_from,\n \"input_shape\": self.input_shape,\n \"pretrain_embed_dimension\": self.forward_modules[\n \"preprocessing\"\n ].output_dim,\n \"target_embed_dimension\": self.forward_modules[\n \"preadapt_aggregator\"\n ].target_dim,\n \"patchsize\": self.patch_maker.patchsize,\n \"patchstride\": self.patch_maker.stride,\n \"anomaly_scorer_num_nn\": self.anomaly_scorer.n_nearest_neighbours,\n }\n with open(self._params_file(save_path, prepend), \"wb\") as save_file:\n pickle.dump(patchcore_params, save_file, pickle.HIGHEST_PROTOCOL)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "patchcore.py"], "context_start_lineno": 0, "lineno": 234, "function_name": "save_to_path"}, "groundtruth": " LOGGER.info(\"Saving PatchCore data.\")\n self.anomaly_scorer.save(\n save_path, save_features_separately=False, prepend=prepend\n )\n patchcore_params = {\n \"backbone.name\": self.backbone.name,\n \"layers_to_extract_from\": self.layers_to_extract_from,\n \"input_shape\": self.input_shape,\n \"pretrain_embed_dimension\": self.forward_modules[\n \"preprocessing\"\n ].output_dim,\n \"target_embed_dimension\": self.forward_modules[\n \"preadapt_aggregator\"\n ].target_dim,\n \"patchsize\": self.patch_maker.patchsize,\n \"patchstride\": self.patch_maker.stride,\n \"anomaly_scorer_num_nn\": self.anomaly_scorer.n_nearest_neighbours,\n }\n with open(self._params_file(save_path, prepend), \"wb\") as save_file:\n pickle.dump(patchcore_params, save_file, pickle.HIGHEST_PROTOCOL)\n"} +{"prompt": "\"\"\"PatchCore and PatchCore detection methods.\"\"\"\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tqdm\n\nimport patchcore\nimport patchcore.backbones\nimport patchcore.common\nimport patchcore.sampler\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PatchCore(torch.nn.Module):\n def __init__(self, device):\n \"\"\"PatchCore anomaly detection class.\"\"\"\n super(PatchCore, self).__init__()\n self.device = device\n\n def load(\n self,\n backbone,\n layers_to_extract_from,\n device,\n input_shape,\n pretrain_embed_dimension,\n target_embed_dimension,\n patchsize=3,\n patchstride=1,\n anomaly_score_num_nn=1,\n featuresampler=patchcore.sampler.IdentitySampler(),\n nn_method=patchcore.common.FaissNN(False, 4),\n **kwargs,\n ):\n self.backbone = backbone.to(device)\n self.layers_to_extract_from = layers_to_extract_from\n self.input_shape = input_shape\n\n self.device = device\n self.patch_maker = PatchMaker(patchsize, stride=patchstride)\n\n self.forward_modules = torch.nn.ModuleDict({})\n\n feature_aggregator = patchcore.common.NetworkFeatureAggregator(\n self.backbone, self.layers_to_extract_from, self.device\n )\n feature_dimensions = feature_aggregator.feature_dimensions(input_shape)\n self.forward_modules[\"feature_aggregator\"] = feature_aggregator\n\n preprocessing = patchcore.common.Preprocessing(\n feature_dimensions, pretrain_embed_dimension\n )\n self.forward_modules[\"preprocessing\"] = preprocessing\n\n self.target_embed_dimension = target_embed_dimension\n preadapt_aggregator = patchcore.common.Aggregator(\n target_dim=target_embed_dimension\n )\n\n _ = preadapt_aggregator.to(self.device)\n\n self.forward_modules[\"preadapt_aggregator\"] = preadapt_aggregator\n\n self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(\n n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method\n )\n\n self.anomaly_segmentor = patchcore.common.RescaleSegmentor(\n device=self.device, target_size=input_shape[-2:]\n )\n\n self.featuresampler = featuresampler\n\n def embed(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n features = []\n for image in data:\n if isinstance(image, dict):\n image = image[\"image\"]\n with torch.no_grad():\n input_image = image.to(torch.float).to(self.device)\n features.append(self._embed(input_image))\n return features\n return self._embed(data)\n\n def _embed(self, images, detach=True, provide_patch_shapes=False):\n \"\"\"Returns feature embeddings for images.\"\"\"\n\n def _detach(features):\n if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n\n _ = self.forward_modules[\"feature_aggregator\"].eval()\n with torch.no_grad():\n features = self.forward_modules[\"feature_aggregator\"](images)\n\n features = [features[layer] for layer in self.layers_to_extract_from]\n\n features = [\n self.patch_maker.patchify(x, return_spatial_info=True) for x in features\n ]\n patch_shapes = [x[1] for x in features]\n features = [x[0] for x in features]\n ref_num_patches = patch_shapes[0]\n\n for i in range(1, len(features)):\n _features = features[i]\n patch_dims = patch_shapes[i]\n\n # TODO(pgehler): Add comments\n _features = _features.reshape(\n _features.shape[0], patch_dims[0], patch_dims[1], *_features.shape[2:]\n )\n _features = _features.permute(0, -3, -2, -1, 1, 2)\n perm_base_shape = _features.shape\n _features = _features.reshape(-1, *_features.shape[-2:])\n _features = F.interpolate(\n _features.unsqueeze(1),\n size=(ref_num_patches[0], ref_num_patches[1]),\n mode=\"bilinear\",\n align_corners=False,\n )\n _features = _features.squeeze(1)\n _features = _features.reshape(\n *perm_base_shape[:-2], ref_num_patches[0], ref_num_patches[1]\n )\n _features = _features.permute(0, -2, -1, 1, 2, 3)\n _features = _features.reshape(len(_features), -1, *_features.shape[-3:])\n features[i] = _features\n features = [x.reshape(-1, *x.shape[-3:]) for x in features]\n\n # As different feature backbones & patching provide differently\n # sized features, these are brought into the correct form here.\n features = self.forward_modules[\"preprocessing\"](features)\n features = self.forward_modules[\"preadapt_aggregator\"](features)\n\n if provide_patch_shapes:\n return _detach(features), patch_shapes\n return _detach(features)\n\n def fit(self, training_data):\n \"\"\"PatchCore training.\n\n This function computes the embeddings of the training data and fills the\n memory bank of SPADE.\n \"\"\"\n self._fill_memory_bank(training_data)\n\n def _fill_memory_bank(self, input_data):\n \"\"\"Computes and sets the support features for SPADE.\"\"\"\n _ = self.forward_modules.eval()\n\n def _image_to_features(input_image):\n with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n\n features = []\n with tqdm.tqdm(\n input_data, desc=\"Computing support features...\", position=1, leave=False\n ) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n image = image[\"image\"]\n features.append(_image_to_features(image))\n\n features = np.concatenate(features, axis=0)\n features = self.featuresampler.run(features)\n\n self.anomaly_scorer.fit(detection_features=[features])\n\n def predict(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n return self._predict_dataloader(data)\n return self._predict(data)\n\n def _predict_dataloader(self, dataloader):\n \"\"\"This function provides anomaly scores/maps for full dataloaders.\"\"\"\n _ = self.forward_modules.eval()\n\n scores = []\n masks = []\n labels_gt = []\n masks_gt = []\n with tqdm.tqdm(dataloader, desc=\"Inferring...\", leave=False) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n labels_gt.extend(image[\"is_anomaly\"].numpy().tolist())\n masks_gt.extend(image[\"mask\"].numpy().tolist())\n image = image[\"image\"]\n _scores, _masks = self._predict(image)\n for score, mask in zip(_scores, _masks):\n scores.append(score)\n masks.append(mask)\n return scores, masks, labels_gt, masks_gt\n\n def _predict(self, images):\n \"\"\"Infer score and mask for a batch of images.\"\"\"\n images = images.to(torch.float).to(self.device)\n _ = self.forward_modules.eval()\n\n batchsize = images.shape[0]\n with torch.no_grad():\n features, patch_shapes = self._embed(images, provide_patch_shapes=True)\n features = np.asarray(features)\n\n patch_scores = image_scores = self.anomaly_scorer.predict([features])[0]\n image_scores = self.patch_maker.unpatch_scores(\n image_scores, batchsize=batchsize\n )\n image_scores = image_scores.reshape(*image_scores.shape[:2], -1)\n image_scores = self.patch_maker.score(image_scores)\n\n patch_scores = self.patch_maker.unpatch_scores(\n patch_scores, batchsize=batchsize\n )\n scales = patch_shapes[0]\n patch_scores = patch_scores.reshape(batchsize, scales[0], scales[1])\n\n masks = self.anomaly_segmentor.convert_to_segmentation(patch_scores)\n\n return [score for score in image_scores], [mask for mask in masks]\n\n @staticmethod\n def _params_file(filepath, prepend=\"\"):\n return os.path.join(filepath, prepend + \"patchcore_params.pkl\")\n\n def save_to_path(self, save_path: str, prepend: str = \"\") -> None:\n LOGGER.info(\"Saving PatchCore data.\")\n self.anomaly_scorer.save(\n save_path, save_features_separately=False, prepend=prepend\n )\n patchcore_params = {\n \"backbone.name\": self.backbone.name,\n \"layers_to_extract_from\": self.layers_to_extract_from,\n \"input_shape\": self.input_shape,\n \"pretrain_embed_dimension\": self.forward_modules[\n \"preprocessing\"\n ].output_dim,\n \"target_embed_dimension\": self.forward_modules[\n \"preadapt_aggregator\"\n ].target_dim,\n \"patchsize\": self.patch_maker.patchsize,\n \"patchstride\": self.patch_maker.stride,\n \"anomaly_scorer_num_nn\": self.anomaly_scorer.n_nearest_neighbours,\n }\n with open(self._params_file(save_path, prepend), \"wb\") as save_file:\n pickle.dump(patchcore_params, save_file, pickle.HIGHEST_PROTOCOL)\n\n def load_from_path(\n self,\n load_path: str,\n device: torch.device,\n nn_method: patchcore.common.FaissNN(False, 4),\n prepend: str = \"\",\n ) -> None:\n LOGGER.info(\"Loading and initializing PatchCore.\")\n with open(self._params_file(load_path, prepend), \"rb\") as load_file:\n patchcore_params = pickle.load(load_file)\n patchcore_params[\"backbone\"] = patchcore.backbones.load(\n patchcore_params[\"backbone.name\"]\n )\n patchcore_params[\"backbone\"].name = patchcore_params[\"backbone.name\"]\n del patchcore_params[\"backbone.name\"]\n self.load(**patchcore_params, device=device, nn_method=nn_method)\n\n self.anomaly_scorer.load(load_path, prepend)\n\n\n# Image handling classes.\nclass PatchMaker:\n def __init__(self, patchsize, stride=None):\n self.patchsize = patchsize\n self.stride = stride\n\n def patchify(self, features, return_spatial_info=False):\n \"\"\"Convert a tensor into a tensor of respective patches.\n Args:\n x: [torch.Tensor, bs x c x w x h]\n Returns:\n x: [torch.Tensor, bs * w//stride * h//stride, c, patchsize,\n patchsize]\n \"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/8", "ground_truth": " padding = int((self.patchsize - 1) / 2)\n unfolder = torch.nn.Unfold(\n kernel_size=self.patchsize, stride=self.stride, padding=padding, dilation=1\n )\n unfolded_features = unfolder(features)\n number_of_total_patches = []\n for s in features.shape[-2:]:\n n_patches = (\n s + 2 * padding - 1 * (self.patchsize - 1) - 1\n ) / self.stride + 1\n number_of_total_patches.append(int(n_patches))\n unfolded_features = unfolded_features.reshape(\n *features.shape[:2], self.patchsize, self.patchsize, -1\n )\n unfolded_features = unfolded_features.permute(0, 4, 1, 2, 3)\n\n if return_spatial_info:\n return unfolded_features, number_of_total_patches\n return unfolded_features\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "patchcore.py"], "context_start_lineno": 0, "lineno": 289, "function_name": "patchify"}, "groundtruth": " padding = int((self.patchsize - 1) / 2)\n unfolder = torch.nn.Unfold(\n kernel_size=self.patchsize, stride=self.stride, padding=padding, dilation=1\n )\n unfolded_features = unfolder(features)\n number_of_total_patches = []\n for s in features.shape[-2:]:\n n_patches = (\n s + 2 * padding - 1 * (self.patchsize - 1) - 1\n ) / self.stride + 1\n number_of_total_patches.append(int(n_patches))\n unfolded_features = unfolded_features.reshape(\n *features.shape[:2], self.patchsize, self.patchsize, -1\n )\n unfolded_features = unfolded_features.permute(0, 4, 1, 2, 3)\n\n if return_spatial_info:\n return unfolded_features, number_of_total_patches\n return unfolded_features\n"} +{"prompt": "\"\"\"PatchCore and PatchCore detection methods.\"\"\"\nimport logging\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport tqdm\n\nimport patchcore\nimport patchcore.backbones\nimport patchcore.common\nimport patchcore.sampler\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass PatchCore(torch.nn.Module):\n def __init__(self, device):\n \"\"\"PatchCore anomaly detection class.\"\"\"\n super(PatchCore, self).__init__()\n self.device = device\n\n def load(\n self,\n backbone,\n layers_to_extract_from,\n device,\n input_shape,\n pretrain_embed_dimension,\n target_embed_dimension,\n patchsize=3,\n patchstride=1,\n anomaly_score_num_nn=1,\n featuresampler=patchcore.sampler.IdentitySampler(),\n nn_method=patchcore.common.FaissNN(False, 4),\n **kwargs,\n ):\n self.backbone = backbone.to(device)\n self.layers_to_extract_from = layers_to_extract_from\n self.input_shape = input_shape\n\n self.device = device\n self.patch_maker = PatchMaker(patchsize, stride=patchstride)\n\n self.forward_modules = torch.nn.ModuleDict({})\n\n feature_aggregator = patchcore.common.NetworkFeatureAggregator(\n self.backbone, self.layers_to_extract_from, self.device\n )\n feature_dimensions = feature_aggregator.feature_dimensions(input_shape)\n self.forward_modules[\"feature_aggregator\"] = feature_aggregator\n\n preprocessing = patchcore.common.Preprocessing(\n feature_dimensions, pretrain_embed_dimension\n )\n self.forward_modules[\"preprocessing\"] = preprocessing\n\n self.target_embed_dimension = target_embed_dimension\n preadapt_aggregator = patchcore.common.Aggregator(\n target_dim=target_embed_dimension\n )\n\n _ = preadapt_aggregator.to(self.device)\n\n self.forward_modules[\"preadapt_aggregator\"] = preadapt_aggregator\n\n self.anomaly_scorer = patchcore.common.NearestNeighbourScorer(\n n_nearest_neighbours=anomaly_score_num_nn, nn_method=nn_method\n )\n\n self.anomaly_segmentor = patchcore.common.RescaleSegmentor(\n device=self.device, target_size=input_shape[-2:]\n )\n\n self.featuresampler = featuresampler\n\n def embed(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n features = []\n for image in data:\n if isinstance(image, dict):\n image = image[\"image\"]\n with torch.no_grad():\n input_image = image.to(torch.float).to(self.device)\n features.append(self._embed(input_image))\n return features\n return self._embed(data)\n\n def _embed(self, images, detach=True, provide_patch_shapes=False):\n \"\"\"Returns feature embeddings for images.\"\"\"\n\n def _detach(features):\n if detach:\n return [x.detach().cpu().numpy() for x in features]\n return features\n\n _ = self.forward_modules[\"feature_aggregator\"].eval()\n with torch.no_grad():\n features = self.forward_modules[\"feature_aggregator\"](images)\n\n features = [features[layer] for layer in self.layers_to_extract_from]\n\n features = [\n self.patch_maker.patchify(x, return_spatial_info=True) for x in features\n ]\n patch_shapes = [x[1] for x in features]\n features = [x[0] for x in features]\n ref_num_patches = patch_shapes[0]\n\n for i in range(1, len(features)):\n _features = features[i]\n patch_dims = patch_shapes[i]\n\n # TODO(pgehler): Add comments\n _features = _features.reshape(\n _features.shape[0], patch_dims[0], patch_dims[1], *_features.shape[2:]\n )\n _features = _features.permute(0, -3, -2, -1, 1, 2)\n perm_base_shape = _features.shape\n _features = _features.reshape(-1, *_features.shape[-2:])\n _features = F.interpolate(\n _features.unsqueeze(1),\n size=(ref_num_patches[0], ref_num_patches[1]),\n mode=\"bilinear\",\n align_corners=False,\n )\n _features = _features.squeeze(1)\n _features = _features.reshape(\n *perm_base_shape[:-2], ref_num_patches[0], ref_num_patches[1]\n )\n _features = _features.permute(0, -2, -1, 1, 2, 3)\n _features = _features.reshape(len(_features), -1, *_features.shape[-3:])\n features[i] = _features\n features = [x.reshape(-1, *x.shape[-3:]) for x in features]\n\n # As different feature backbones & patching provide differently\n # sized features, these are brought into the correct form here.\n features = self.forward_modules[\"preprocessing\"](features)\n features = self.forward_modules[\"preadapt_aggregator\"](features)\n\n if provide_patch_shapes:\n return _detach(features), patch_shapes\n return _detach(features)\n\n def fit(self, training_data):\n \"\"\"PatchCore training.\n\n This function computes the embeddings of the training data and fills the\n memory bank of SPADE.\n \"\"\"\n self._fill_memory_bank(training_data)\n\n def _fill_memory_bank(self, input_data):\n \"\"\"Computes and sets the support features for SPADE.\"\"\"\n _ = self.forward_modules.eval()\n\n def _image_to_features(input_image):\n with torch.no_grad():\n input_image = input_image.to(torch.float).to(self.device)\n return self._embed(input_image)\n\n features = []\n with tqdm.tqdm(\n input_data, desc=\"Computing support features...\", position=1, leave=False\n ) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n image = image[\"image\"]\n features.append(_image_to_features(image))\n\n features = np.concatenate(features, axis=0)\n features = self.featuresampler.run(features)\n\n self.anomaly_scorer.fit(detection_features=[features])\n\n def predict(self, data):\n if isinstance(data, torch.utils.data.DataLoader):\n return self._predict_dataloader(data)\n return self._predict(data)\n\n def _predict_dataloader(self, dataloader):\n \"\"\"This function provides anomaly scores/maps for full dataloaders.\"\"\"\n _ = self.forward_modules.eval()\n\n scores = []\n masks = []\n labels_gt = []\n masks_gt = []\n with tqdm.tqdm(dataloader, desc=\"Inferring...\", leave=False) as data_iterator:\n for image in data_iterator:\n if isinstance(image, dict):\n labels_gt.extend(image[\"is_anomaly\"].numpy().tolist())\n masks_gt.extend(image[\"mask\"].numpy().tolist())\n image = image[\"image\"]\n _scores, _masks = self._predict(image)\n for score, mask in zip(_scores, _masks):\n scores.append(score)\n masks.append(mask)\n return scores, masks, labels_gt, masks_gt\n\n def _predict(self, images):\n \"\"\"Infer score and mask for a batch of images.\"\"\"\n images = images.to(torch.float).to(self.device)\n _ = self.forward_modules.eval()\n\n batchsize = images.shape[0]\n with torch.no_grad():\n features, patch_shapes = self._embed(images, provide_patch_shapes=True)\n features = np.asarray(features)\n\n patch_scores = image_scores = self.anomaly_scorer.predict([features])[0]\n image_scores = self.patch_maker.unpatch_scores(\n image_scores, batchsize=batchsize\n )\n image_scores = image_scores.reshape(*image_scores.shape[:2], -1)\n image_scores = self.patch_maker.score(image_scores)\n\n patch_scores = self.patch_maker.unpatch_scores(\n patch_scores, batchsize=batchsize\n )\n scales = patch_shapes[0]\n patch_scores = patch_scores.reshape(batchsize, scales[0], scales[1])\n\n masks = self.anomaly_segmentor.convert_to_segmentation(patch_scores)\n\n return [score for score in image_scores], [mask for mask in masks]\n\n @staticmethod\n def _params_file(filepath, prepend=\"\"):\n return os.path.join(filepath, prepend + \"patchcore_params.pkl\")\n\n def save_to_path(self, save_path: str, prepend: str = \"\") -> None:\n LOGGER.info(\"Saving PatchCore data.\")\n self.anomaly_scorer.save(\n save_path, save_features_separately=False, prepend=prepend\n )\n patchcore_params = {\n \"backbone.name\": self.backbone.name,\n \"layers_to_extract_from\": self.layers_to_extract_from,\n \"input_shape\": self.input_shape,\n \"pretrain_embed_dimension\": self.forward_modules[\n \"preprocessing\"\n ].output_dim,\n \"target_embed_dimension\": self.forward_modules[\n \"preadapt_aggregator\"\n ].target_dim,\n \"patchsize\": self.patch_maker.patchsize,\n \"patchstride\": self.patch_maker.stride,\n \"anomaly_scorer_num_nn\": self.anomaly_scorer.n_nearest_neighbours,\n }\n with open(self._params_file(save_path, prepend), \"wb\") as save_file:\n pickle.dump(patchcore_params, save_file, pickle.HIGHEST_PROTOCOL)\n\n def load_from_path(\n self,\n load_path: str,\n device: torch.device,\n nn_method: patchcore.common.FaissNN(False, 4),\n prepend: str = \"\",\n ) -> None:\n LOGGER.info(\"Loading and initializing PatchCore.\")\n with open(self._params_file(load_path, prepend), \"rb\") as load_file:\n patchcore_params = pickle.load(load_file)\n patchcore_params[\"backbone\"] = patchcore.backbones.load(\n patchcore_params[\"backbone.name\"]\n )\n patchcore_params[\"backbone\"].name = patchcore_params[\"backbone.name\"]\n del patchcore_params[\"backbone.name\"]\n self.load(**patchcore_params, device=device, nn_method=nn_method)\n\n self.anomaly_scorer.load(load_path, prepend)\n\n\n# Image handling classes.\nclass PatchMaker:\n def __init__(self, patchsize, stride=None):\n self.patchsize = patchsize\n self.stride = stride\n\n def patchify(self, features, return_spatial_info=False):\n \"\"\"Convert a tensor into a tensor of respective patches.\n Args:\n x: [torch.Tensor, bs x c x w x h]\n Returns:\n x: [torch.Tensor, bs * w//stride * h//stride, c, patchsize,\n patchsize]\n \"\"\"\n padding = int((self.patchsize - 1) / 2)\n unfolder = torch.nn.Unfold(\n kernel_size=self.patchsize, stride=self.stride, padding=padding, dilation=1\n )\n unfolded_features = unfolder(features)\n number_of_total_patches = []\n for s in features.shape[-2:]:\n n_patches = (\n s + 2 * padding - 1 * (self.patchsize - 1) - 1\n ) / self.stride + 1\n number_of_total_patches.append(int(n_patches))\n unfolded_features = unfolded_features.reshape(\n *features.shape[:2], self.patchsize, self.patchsize, -1\n )\n unfolded_features = unfolded_features.permute(0, 4, 1, 2, 3)\n\n if return_spatial_info:\n return unfolded_features, number_of_total_patches\n return unfolded_features\n\n def unpatch_scores(self, x, batchsize):\n return x.reshape(batchsize, -1, *x.shape[1:])\n\n def score(self, x):", "metadata": {"task_id": "amazon-science--patchcore-inspection/9", "ground_truth": " was_numpy = False\n if isinstance(x, np.ndarray):\n was_numpy = True\n x = torch.from_numpy(x)\n while x.ndim > 1:\n x = torch.max(x, dim=-1).values\n if was_numpy:\n return x.numpy()\n return x\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "patchcore.py"], "context_start_lineno": 0, "lineno": 313, "function_name": "score"}, "groundtruth": " was_numpy = False\n if isinstance(x, np.ndarray):\n was_numpy = True\n x = torch.from_numpy(x)\n while x.ndim > 1:\n x = torch.max(x, dim=-1).values\n if was_numpy:\n return x.numpy()\n return x\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/10", "ground_truth": " faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 21, "function_name": "__init__"}, "groundtruth": " faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):", "metadata": {"task_id": "amazon-science--patchcore-inspection/11", "ground_truth": " if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 29, "function_name": "_index_to_gpu"}, "groundtruth": " if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):", "metadata": {"task_id": "amazon-science--patchcore-inspection/12", "ground_truth": " if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 38, "function_name": "_index_to_cpu"}, "groundtruth": " if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):", "metadata": {"task_id": "amazon-science--patchcore-inspection/13", "ground_truth": " if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 43, "function_name": "_create_index"}, "groundtruth": " if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/14", "ground_truth": " if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 56, "function_name": "fit"}, "groundtruth": " if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/15", "ground_truth": " if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 78, "function_name": "run"}, "groundtruth": " if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):", "metadata": {"task_id": "amazon-science--patchcore-inspection/16", "ground_truth": " if self.search_index:\n self.search_index.reset()\n self.search_index = None\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 94, "function_name": "reset_index"}, "groundtruth": " if self.search_index:\n self.search_index.reset()\n self.search_index = None\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):", "metadata": {"task_id": "amazon-science--patchcore-inspection/17", "ground_truth": " index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 109, "function_name": "_create_index"}, "groundtruth": " index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC", "metadata": {"task_id": "amazon-science--patchcore-inspection/18", "ground_truth": " return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 132, "function_name": "_reduce"}, "groundtruth": " return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):", "metadata": {"task_id": "amazon-science--patchcore-inspection/19", "ground_truth": " super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 146, "function_name": "__init__"}, "groundtruth": " super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):", "metadata": {"task_id": "amazon-science--patchcore-inspection/20", "ground_truth": " _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 156, "function_name": "forward"}, "groundtruth": " _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim", "metadata": {"task_id": "amazon-science--patchcore-inspection/21", "ground_truth": " features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 180, "function_name": "forward"}, "groundtruth": " features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):", "metadata": {"task_id": "amazon-science--patchcore-inspection/22", "ground_truth": " self.device = device\n self.target_size = target_size\n self.smoothing = 4\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 187, "function_name": "__init__"}, "groundtruth": " self.device = device\n self.target_size = target_size\n self.smoothing = 4\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):", "metadata": {"task_id": "amazon-science--patchcore-inspection/23", "ground_truth": " with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 193, "function_name": "convert_to_segmentation"}, "groundtruth": " with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):\n\n with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n\n\nclass NetworkFeatureAggregator(torch.nn.Module):\n \"\"\"Efficient extraction of network features.\"\"\"\n\n def __init__(self, backbone, layers_to_extract_from, device):\n super(NetworkFeatureAggregator, self).__init__()\n \"\"\"Extraction of network features.\n\n Runs a network only to the last layer of the list of layers where\n network features should be extracted from.\n\n Args:\n backbone: torchvision.model\n layers_to_extract_from: [list of str]\n \"\"\"\n self.layers_to_extract_from = layers_to_extract_from\n self.backbone = backbone\n self.device = device\n if not hasattr(backbone, \"hook_handles\"):\n self.backbone.hook_handles = []\n for handle in self.backbone.hook_handles:\n handle.remove()\n self.outputs = {}\n\n for extract_layer in layers_to_extract_from:\n forward_hook = ForwardHook(\n self.outputs, extract_layer, layers_to_extract_from[-1]\n )\n if \".\" in extract_layer:\n extract_block, extract_idx = extract_layer.split(\".\")\n network_layer = backbone.__dict__[\"_modules\"][extract_block]\n if extract_idx.isnumeric():\n extract_idx = int(extract_idx)\n network_layer = network_layer[extract_idx]\n else:\n network_layer = network_layer.__dict__[\"_modules\"][extract_idx]\n else:\n network_layer = backbone.__dict__[\"_modules\"][extract_layer]\n\n if isinstance(network_layer, torch.nn.Sequential):\n self.backbone.hook_handles.append(\n network_layer[-1].register_forward_hook(forward_hook)\n )\n else:\n self.backbone.hook_handles.append(\n network_layer.register_forward_hook(forward_hook)\n )\n self.to(self.device)\n\n def forward(self, images):", "metadata": {"task_id": "amazon-science--patchcore-inspection/24", "ground_truth": " self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 259, "function_name": "forward"}, "groundtruth": " self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):\n\n with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n\n\nclass NetworkFeatureAggregator(torch.nn.Module):\n \"\"\"Efficient extraction of network features.\"\"\"\n\n def __init__(self, backbone, layers_to_extract_from, device):\n super(NetworkFeatureAggregator, self).__init__()\n \"\"\"Extraction of network features.\n\n Runs a network only to the last layer of the list of layers where\n network features should be extracted from.\n\n Args:\n backbone: torchvision.model\n layers_to_extract_from: [list of str]\n \"\"\"\n self.layers_to_extract_from = layers_to_extract_from\n self.backbone = backbone\n self.device = device\n if not hasattr(backbone, \"hook_handles\"):\n self.backbone.hook_handles = []\n for handle in self.backbone.hook_handles:\n handle.remove()\n self.outputs = {}\n\n for extract_layer in layers_to_extract_from:\n forward_hook = ForwardHook(\n self.outputs, extract_layer, layers_to_extract_from[-1]\n )\n if \".\" in extract_layer:\n extract_block, extract_idx = extract_layer.split(\".\")\n network_layer = backbone.__dict__[\"_modules\"][extract_block]\n if extract_idx.isnumeric():\n extract_idx = int(extract_idx)\n network_layer = network_layer[extract_idx]\n else:\n network_layer = network_layer.__dict__[\"_modules\"][extract_idx]\n else:\n network_layer = backbone.__dict__[\"_modules\"][extract_layer]\n\n if isinstance(network_layer, torch.nn.Sequential):\n self.backbone.hook_handles.append(\n network_layer[-1].register_forward_hook(forward_hook)\n )\n else:\n self.backbone.hook_handles.append(\n network_layer.register_forward_hook(forward_hook)\n )\n self.to(self.device)\n\n def forward(self, images):\n self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n\n def feature_dimensions(self, input_shape):\n \"\"\"Computes the feature dimensions for all layers given input_shape.\"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/25", "ground_truth": " _input = torch.ones([1] + list(input_shape)).to(self.device)\n _output = self(_input)\n return [_output[layer].shape[1] for layer in self.layers_to_extract_from]\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 271, "function_name": "feature_dimensions"}, "groundtruth": " _input = torch.ones([1] + list(input_shape)).to(self.device)\n _output = self(_input)\n return [_output[layer].shape[1] for layer in self.layers_to_extract_from]\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):\n\n with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n\n\nclass NetworkFeatureAggregator(torch.nn.Module):\n \"\"\"Efficient extraction of network features.\"\"\"\n\n def __init__(self, backbone, layers_to_extract_from, device):\n super(NetworkFeatureAggregator, self).__init__()\n \"\"\"Extraction of network features.\n\n Runs a network only to the last layer of the list of layers where\n network features should be extracted from.\n\n Args:\n backbone: torchvision.model\n layers_to_extract_from: [list of str]\n \"\"\"\n self.layers_to_extract_from = layers_to_extract_from\n self.backbone = backbone\n self.device = device\n if not hasattr(backbone, \"hook_handles\"):\n self.backbone.hook_handles = []\n for handle in self.backbone.hook_handles:\n handle.remove()\n self.outputs = {}\n\n for extract_layer in layers_to_extract_from:\n forward_hook = ForwardHook(\n self.outputs, extract_layer, layers_to_extract_from[-1]\n )\n if \".\" in extract_layer:\n extract_block, extract_idx = extract_layer.split(\".\")\n network_layer = backbone.__dict__[\"_modules\"][extract_block]\n if extract_idx.isnumeric():\n extract_idx = int(extract_idx)\n network_layer = network_layer[extract_idx]\n else:\n network_layer = network_layer.__dict__[\"_modules\"][extract_idx]\n else:\n network_layer = backbone.__dict__[\"_modules\"][extract_layer]\n\n if isinstance(network_layer, torch.nn.Sequential):\n self.backbone.hook_handles.append(\n network_layer[-1].register_forward_hook(forward_hook)\n )\n else:\n self.backbone.hook_handles.append(\n network_layer.register_forward_hook(forward_hook)\n )\n self.to(self.device)\n\n def forward(self, images):\n self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n\n def feature_dimensions(self, input_shape):\n \"\"\"Computes the feature dimensions for all layers given input_shape.\"\"\"\n _input = torch.ones([1] + list(input_shape)).to(self.device)\n _output = self(_input)\n return [_output[layer].shape[1] for layer in self.layers_to_extract_from]\n\n\nclass ForwardHook:\n def __init__(self, hook_dict, layer_name: str, last_layer_to_extract: str):", "metadata": {"task_id": "amazon-science--patchcore-inspection/26", "ground_truth": " self.hook_dict = hook_dict\n self.layer_name = layer_name\n self.raise_exception_to_break = copy.deepcopy(\n layer_name == last_layer_to_extract\n )\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 278, "function_name": "__init__"}, "groundtruth": " self.hook_dict = hook_dict\n self.layer_name = layer_name\n self.raise_exception_to_break = copy.deepcopy(\n layer_name == last_layer_to_extract\n )\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):\n\n with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n\n\nclass NetworkFeatureAggregator(torch.nn.Module):\n \"\"\"Efficient extraction of network features.\"\"\"\n\n def __init__(self, backbone, layers_to_extract_from, device):\n super(NetworkFeatureAggregator, self).__init__()\n \"\"\"Extraction of network features.\n\n Runs a network only to the last layer of the list of layers where\n network features should be extracted from.\n\n Args:\n backbone: torchvision.model\n layers_to_extract_from: [list of str]\n \"\"\"\n self.layers_to_extract_from = layers_to_extract_from\n self.backbone = backbone\n self.device = device\n if not hasattr(backbone, \"hook_handles\"):\n self.backbone.hook_handles = []\n for handle in self.backbone.hook_handles:\n handle.remove()\n self.outputs = {}\n\n for extract_layer in layers_to_extract_from:\n forward_hook = ForwardHook(\n self.outputs, extract_layer, layers_to_extract_from[-1]\n )\n if \".\" in extract_layer:\n extract_block, extract_idx = extract_layer.split(\".\")\n network_layer = backbone.__dict__[\"_modules\"][extract_block]\n if extract_idx.isnumeric():\n extract_idx = int(extract_idx)\n network_layer = network_layer[extract_idx]\n else:\n network_layer = network_layer.__dict__[\"_modules\"][extract_idx]\n else:\n network_layer = backbone.__dict__[\"_modules\"][extract_layer]\n\n if isinstance(network_layer, torch.nn.Sequential):\n self.backbone.hook_handles.append(\n network_layer[-1].register_forward_hook(forward_hook)\n )\n else:\n self.backbone.hook_handles.append(\n network_layer.register_forward_hook(forward_hook)\n )\n self.to(self.device)\n\n def forward(self, images):\n self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n\n def feature_dimensions(self, input_shape):\n \"\"\"Computes the feature dimensions for all layers given input_shape.\"\"\"\n _input = torch.ones([1] + list(input_shape)).to(self.device)\n _output = self(_input)\n return [_output[layer].shape[1] for layer in self.layers_to_extract_from]\n\n\nclass ForwardHook:\n def __init__(self, hook_dict, layer_name: str, last_layer_to_extract: str):\n self.hook_dict = hook_dict\n self.layer_name = layer_name\n self.raise_exception_to_break = copy.deepcopy(\n layer_name == last_layer_to_extract\n )\n\n def __call__(self, module, input, output):", "metadata": {"task_id": "amazon-science--patchcore-inspection/27", "ground_truth": " self.hook_dict[self.layer_name] = output\n if self.raise_exception_to_break:\n raise LastLayerToExtractReachedException()\n return None\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 285, "function_name": "__call__"}, "groundtruth": " self.hook_dict[self.layer_name] = output\n if self.raise_exception_to_break:\n raise LastLayerToExtractReachedException()\n return None\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):\n\n with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n\n\nclass NetworkFeatureAggregator(torch.nn.Module):\n \"\"\"Efficient extraction of network features.\"\"\"\n\n def __init__(self, backbone, layers_to_extract_from, device):\n super(NetworkFeatureAggregator, self).__init__()\n \"\"\"Extraction of network features.\n\n Runs a network only to the last layer of the list of layers where\n network features should be extracted from.\n\n Args:\n backbone: torchvision.model\n layers_to_extract_from: [list of str]\n \"\"\"\n self.layers_to_extract_from = layers_to_extract_from\n self.backbone = backbone\n self.device = device\n if not hasattr(backbone, \"hook_handles\"):\n self.backbone.hook_handles = []\n for handle in self.backbone.hook_handles:\n handle.remove()\n self.outputs = {}\n\n for extract_layer in layers_to_extract_from:\n forward_hook = ForwardHook(\n self.outputs, extract_layer, layers_to_extract_from[-1]\n )\n if \".\" in extract_layer:\n extract_block, extract_idx = extract_layer.split(\".\")\n network_layer = backbone.__dict__[\"_modules\"][extract_block]\n if extract_idx.isnumeric():\n extract_idx = int(extract_idx)\n network_layer = network_layer[extract_idx]\n else:\n network_layer = network_layer.__dict__[\"_modules\"][extract_idx]\n else:\n network_layer = backbone.__dict__[\"_modules\"][extract_layer]\n\n if isinstance(network_layer, torch.nn.Sequential):\n self.backbone.hook_handles.append(\n network_layer[-1].register_forward_hook(forward_hook)\n )\n else:\n self.backbone.hook_handles.append(\n network_layer.register_forward_hook(forward_hook)\n )\n self.to(self.device)\n\n def forward(self, images):\n self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n\n def feature_dimensions(self, input_shape):\n \"\"\"Computes the feature dimensions for all layers given input_shape.\"\"\"\n _input = torch.ones([1] + list(input_shape)).to(self.device)\n _output = self(_input)\n return [_output[layer].shape[1] for layer in self.layers_to_extract_from]\n\n\nclass ForwardHook:\n def __init__(self, hook_dict, layer_name: str, last_layer_to_extract: str):\n self.hook_dict = hook_dict\n self.layer_name = layer_name\n self.raise_exception_to_break = copy.deepcopy(\n layer_name == last_layer_to_extract\n )\n\n def __call__(self, module, input, output):\n self.hook_dict[self.layer_name] = output\n if self.raise_exception_to_break:\n raise LastLayerToExtractReachedException()\n return None\n\n\nclass LastLayerToExtractReachedException(Exception):\n pass\n\n\nclass NearestNeighbourScorer(object):\n def __init__(self, n_nearest_neighbours: int, nn_method=FaissNN(False, 4)) -> None:\n \"\"\"\n Neearest-Neighbourhood Anomaly Scorer class.\n\n Args:\n n_nearest_neighbours: [int] Number of nearest neighbours used to\n determine anomalous pixels.\n nn_method: Nearest neighbour search method.\n \"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/28", "ground_truth": " self.feature_merger = ConcatMerger()\n\n self.n_nearest_neighbours = n_nearest_neighbours\n self.nn_method = nn_method\n\n self.imagelevel_nn = lambda query: self.nn_method.run(\n n_nearest_neighbours, query\n )\n self.pixelwise_nn = lambda query, index: self.nn_method.run(1, query, index)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 305, "function_name": "__init__"}, "groundtruth": " self.feature_merger = ConcatMerger()\n\n self.n_nearest_neighbours = n_nearest_neighbours\n self.nn_method = nn_method\n\n self.imagelevel_nn = lambda query: self.nn_method.run(\n n_nearest_neighbours, query\n )\n self.pixelwise_nn = lambda query, index: self.nn_method.run(1, query, index)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):\n\n with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n\n\nclass NetworkFeatureAggregator(torch.nn.Module):\n \"\"\"Efficient extraction of network features.\"\"\"\n\n def __init__(self, backbone, layers_to_extract_from, device):\n super(NetworkFeatureAggregator, self).__init__()\n \"\"\"Extraction of network features.\n\n Runs a network only to the last layer of the list of layers where\n network features should be extracted from.\n\n Args:\n backbone: torchvision.model\n layers_to_extract_from: [list of str]\n \"\"\"\n self.layers_to_extract_from = layers_to_extract_from\n self.backbone = backbone\n self.device = device\n if not hasattr(backbone, \"hook_handles\"):\n self.backbone.hook_handles = []\n for handle in self.backbone.hook_handles:\n handle.remove()\n self.outputs = {}\n\n for extract_layer in layers_to_extract_from:\n forward_hook = ForwardHook(\n self.outputs, extract_layer, layers_to_extract_from[-1]\n )\n if \".\" in extract_layer:\n extract_block, extract_idx = extract_layer.split(\".\")\n network_layer = backbone.__dict__[\"_modules\"][extract_block]\n if extract_idx.isnumeric():\n extract_idx = int(extract_idx)\n network_layer = network_layer[extract_idx]\n else:\n network_layer = network_layer.__dict__[\"_modules\"][extract_idx]\n else:\n network_layer = backbone.__dict__[\"_modules\"][extract_layer]\n\n if isinstance(network_layer, torch.nn.Sequential):\n self.backbone.hook_handles.append(\n network_layer[-1].register_forward_hook(forward_hook)\n )\n else:\n self.backbone.hook_handles.append(\n network_layer.register_forward_hook(forward_hook)\n )\n self.to(self.device)\n\n def forward(self, images):\n self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n\n def feature_dimensions(self, input_shape):\n \"\"\"Computes the feature dimensions for all layers given input_shape.\"\"\"\n _input = torch.ones([1] + list(input_shape)).to(self.device)\n _output = self(_input)\n return [_output[layer].shape[1] for layer in self.layers_to_extract_from]\n\n\nclass ForwardHook:\n def __init__(self, hook_dict, layer_name: str, last_layer_to_extract: str):\n self.hook_dict = hook_dict\n self.layer_name = layer_name\n self.raise_exception_to_break = copy.deepcopy(\n layer_name == last_layer_to_extract\n )\n\n def __call__(self, module, input, output):\n self.hook_dict[self.layer_name] = output\n if self.raise_exception_to_break:\n raise LastLayerToExtractReachedException()\n return None\n\n\nclass LastLayerToExtractReachedException(Exception):\n pass\n\n\nclass NearestNeighbourScorer(object):\n def __init__(self, n_nearest_neighbours: int, nn_method=FaissNN(False, 4)) -> None:\n \"\"\"\n Neearest-Neighbourhood Anomaly Scorer class.\n\n Args:\n n_nearest_neighbours: [int] Number of nearest neighbours used to\n determine anomalous pixels.\n nn_method: Nearest neighbour search method.\n \"\"\"\n self.feature_merger = ConcatMerger()\n\n self.n_nearest_neighbours = n_nearest_neighbours\n self.nn_method = nn_method\n\n self.imagelevel_nn = lambda query: self.nn_method.run(\n n_nearest_neighbours, query\n )\n self.pixelwise_nn = lambda query, index: self.nn_method.run(1, query, index)\n\n def fit(self, detection_features: List[np.ndarray]) -> None:\n \"\"\"Calls the fit function of the nearest neighbour method.\n\n Args:\n detection_features: [list of np.arrays]\n [[bs x d_i] for i in n] Contains a list of\n np.arrays for all training images corresponding to respective\n features VECTORS (or maps, but will be resized) produced by\n some backbone network which should be used for image-level\n anomaly detection.\n \"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/29", "ground_truth": " self.detection_features = self.feature_merger.merge(\n detection_features,\n )\n self.nn_method.fit(self.detection_features)\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 326, "function_name": "fit"}, "groundtruth": " self.detection_features = self.feature_merger.merge(\n detection_features,\n )\n self.nn_method.fit(self.detection_features)\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):\n\n with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n\n\nclass NetworkFeatureAggregator(torch.nn.Module):\n \"\"\"Efficient extraction of network features.\"\"\"\n\n def __init__(self, backbone, layers_to_extract_from, device):\n super(NetworkFeatureAggregator, self).__init__()\n \"\"\"Extraction of network features.\n\n Runs a network only to the last layer of the list of layers where\n network features should be extracted from.\n\n Args:\n backbone: torchvision.model\n layers_to_extract_from: [list of str]\n \"\"\"\n self.layers_to_extract_from = layers_to_extract_from\n self.backbone = backbone\n self.device = device\n if not hasattr(backbone, \"hook_handles\"):\n self.backbone.hook_handles = []\n for handle in self.backbone.hook_handles:\n handle.remove()\n self.outputs = {}\n\n for extract_layer in layers_to_extract_from:\n forward_hook = ForwardHook(\n self.outputs, extract_layer, layers_to_extract_from[-1]\n )\n if \".\" in extract_layer:\n extract_block, extract_idx = extract_layer.split(\".\")\n network_layer = backbone.__dict__[\"_modules\"][extract_block]\n if extract_idx.isnumeric():\n extract_idx = int(extract_idx)\n network_layer = network_layer[extract_idx]\n else:\n network_layer = network_layer.__dict__[\"_modules\"][extract_idx]\n else:\n network_layer = backbone.__dict__[\"_modules\"][extract_layer]\n\n if isinstance(network_layer, torch.nn.Sequential):\n self.backbone.hook_handles.append(\n network_layer[-1].register_forward_hook(forward_hook)\n )\n else:\n self.backbone.hook_handles.append(\n network_layer.register_forward_hook(forward_hook)\n )\n self.to(self.device)\n\n def forward(self, images):\n self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n\n def feature_dimensions(self, input_shape):\n \"\"\"Computes the feature dimensions for all layers given input_shape.\"\"\"\n _input = torch.ones([1] + list(input_shape)).to(self.device)\n _output = self(_input)\n return [_output[layer].shape[1] for layer in self.layers_to_extract_from]\n\n\nclass ForwardHook:\n def __init__(self, hook_dict, layer_name: str, last_layer_to_extract: str):\n self.hook_dict = hook_dict\n self.layer_name = layer_name\n self.raise_exception_to_break = copy.deepcopy(\n layer_name == last_layer_to_extract\n )\n\n def __call__(self, module, input, output):\n self.hook_dict[self.layer_name] = output\n if self.raise_exception_to_break:\n raise LastLayerToExtractReachedException()\n return None\n\n\nclass LastLayerToExtractReachedException(Exception):\n pass\n\n\nclass NearestNeighbourScorer(object):\n def __init__(self, n_nearest_neighbours: int, nn_method=FaissNN(False, 4)) -> None:\n \"\"\"\n Neearest-Neighbourhood Anomaly Scorer class.\n\n Args:\n n_nearest_neighbours: [int] Number of nearest neighbours used to\n determine anomalous pixels.\n nn_method: Nearest neighbour search method.\n \"\"\"\n self.feature_merger = ConcatMerger()\n\n self.n_nearest_neighbours = n_nearest_neighbours\n self.nn_method = nn_method\n\n self.imagelevel_nn = lambda query: self.nn_method.run(\n n_nearest_neighbours, query\n )\n self.pixelwise_nn = lambda query, index: self.nn_method.run(1, query, index)\n\n def fit(self, detection_features: List[np.ndarray]) -> None:\n \"\"\"Calls the fit function of the nearest neighbour method.\n\n Args:\n detection_features: [list of np.arrays]\n [[bs x d_i] for i in n] Contains a list of\n np.arrays for all training images corresponding to respective\n features VECTORS (or maps, but will be resized) produced by\n some backbone network which should be used for image-level\n anomaly detection.\n \"\"\"\n self.detection_features = self.feature_merger.merge(\n detection_features,\n )\n self.nn_method.fit(self.detection_features)\n\n def predict(\n self, query_features: List[np.ndarray]\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Predicts anomaly score.\n\n Searches for nearest neighbours of test images in all\n support training images.\n\n Args:\n detection_query_features: [dict of np.arrays] List of np.arrays\n corresponding to the test features generated by\n some backbone network.\n \"\"\"", "metadata": {"task_id": "amazon-science--patchcore-inspection/30", "ground_truth": " query_features = self.feature_merger.merge(\n query_features,\n )\n query_distances, query_nns = self.imagelevel_nn(query_features)\n anomaly_scores = np.mean(query_distances, axis=-1)\n return anomaly_scores, query_distances, query_nns\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 344, "function_name": "predict"}, "groundtruth": " query_features = self.feature_merger.merge(\n query_features,\n )\n query_distances, query_nns = self.imagelevel_nn(query_features)\n anomaly_scores = np.mean(query_distances, axis=-1)\n return anomaly_scores, query_distances, query_nns\n"} +{"prompt": "import copy\nimport os\nimport pickle\nfrom typing import List\nfrom typing import Union\n\nimport faiss\nimport numpy as np\nimport scipy.ndimage as ndimage\nimport torch\nimport torch.nn.functional as F\n\n\nclass FaissNN(object):\n def __init__(self, on_gpu: bool = False, num_workers: int = 4) -> None:\n \"\"\"FAISS Nearest neighbourhood search.\n\n Args:\n on_gpu: If set true, nearest neighbour searches are done on GPU.\n num_workers: Number of workers to use with FAISS for similarity search.\n \"\"\"\n faiss.omp_set_num_threads(num_workers)\n self.on_gpu = on_gpu\n self.search_index = None\n\n def _gpu_cloner_options(self):\n return faiss.GpuClonerOptions()\n\n def _index_to_gpu(self, index):\n if self.on_gpu:\n # For the non-gpu faiss python package, there is no GpuClonerOptions\n # so we can not make a default in the function header.\n return faiss.index_cpu_to_gpu(\n faiss.StandardGpuResources(), 0, index, self._gpu_cloner_options()\n )\n return index\n\n def _index_to_cpu(self, index):\n if self.on_gpu:\n return faiss.index_gpu_to_cpu(index)\n return index\n\n def _create_index(self, dimension):\n if self.on_gpu:\n return faiss.GpuIndexFlatL2(\n faiss.StandardGpuResources(), dimension, faiss.GpuIndexFlatConfig()\n )\n return faiss.IndexFlatL2(dimension)\n\n def fit(self, features: np.ndarray) -> None:\n \"\"\"\n Adds features to the FAISS search index.\n\n Args:\n features: Array of size NxD.\n \"\"\"\n if self.search_index:\n self.reset_index()\n self.search_index = self._create_index(features.shape[-1])\n self._train(self.search_index, features)\n self.search_index.add(features)\n\n def _train(self, _index, _features):\n pass\n\n def run(\n self,\n n_nearest_neighbours,\n query_features: np.ndarray,\n index_features: np.ndarray = None,\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"\n Returns distances and indices of nearest neighbour search.\n\n Args:\n query_features: Features to retrieve.\n index_features: [optional] Index features to search in.\n \"\"\"\n if index_features is None:\n return self.search_index.search(query_features, n_nearest_neighbours)\n\n # Build a search index just for this search.\n search_index = self._create_index(index_features.shape[-1])\n self._train(search_index, index_features)\n search_index.add(index_features)\n return search_index.search(query_features, n_nearest_neighbours)\n\n def save(self, filename: str) -> None:\n faiss.write_index(self._index_to_cpu(self.search_index), filename)\n\n def load(self, filename: str) -> None:\n self.search_index = self._index_to_gpu(faiss.read_index(filename))\n\n def reset_index(self):\n if self.search_index:\n self.search_index.reset()\n self.search_index = None\n\n\nclass ApproximateFaissNN(FaissNN):\n def _train(self, index, features):\n index.train(features)\n\n def _gpu_cloner_options(self):\n cloner = faiss.GpuClonerOptions()\n cloner.useFloat16 = True\n return cloner\n\n def _create_index(self, dimension):\n index = faiss.IndexIVFPQ(\n faiss.IndexFlatL2(dimension),\n dimension,\n 512, # n_centroids\n 64, # sub-quantizers\n 8,\n ) # nbits per code\n return self._index_to_gpu(index)\n\n\nclass _BaseMerger:\n def __init__(self):\n \"\"\"Merges feature embedding by name.\"\"\"\n\n def merge(self, features: list):\n features = [self._reduce(feature) for feature in features]\n return np.concatenate(features, axis=1)\n\n\nclass AverageMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxC\n return features.reshape([features.shape[0], features.shape[1], -1]).mean(\n axis=-1\n )\n\n\nclass ConcatMerger(_BaseMerger):\n @staticmethod\n def _reduce(features):\n # NxCxWxH -> NxCWH\n return features.reshape(len(features), -1)\n\n\nclass Preprocessing(torch.nn.Module):\n def __init__(self, input_dims, output_dim):\n super(Preprocessing, self).__init__()\n self.input_dims = input_dims\n self.output_dim = output_dim\n\n self.preprocessing_modules = torch.nn.ModuleList()\n for input_dim in input_dims:\n module = MeanMapper(output_dim)\n self.preprocessing_modules.append(module)\n\n def forward(self, features):\n _features = []\n for module, feature in zip(self.preprocessing_modules, features):\n _features.append(module(feature))\n return torch.stack(_features, dim=1)\n\n\nclass MeanMapper(torch.nn.Module):\n def __init__(self, preprocessing_dim):\n super(MeanMapper, self).__init__()\n self.preprocessing_dim = preprocessing_dim\n\n def forward(self, features):\n features = features.reshape(len(features), 1, -1)\n return F.adaptive_avg_pool1d(features, self.preprocessing_dim).squeeze(1)\n\n\nclass Aggregator(torch.nn.Module):\n def __init__(self, target_dim):\n super(Aggregator, self).__init__()\n self.target_dim = target_dim\n\n def forward(self, features):\n \"\"\"Returns reshaped and average pooled features.\"\"\"\n # batchsize x number_of_layers x input_dim -> batchsize x target_dim\n features = features.reshape(len(features), 1, -1)\n features = F.adaptive_avg_pool1d(features, self.target_dim)\n return features.reshape(len(features), -1)\n\n\nclass RescaleSegmentor:\n def __init__(self, device, target_size=224):\n self.device = device\n self.target_size = target_size\n self.smoothing = 4\n\n def convert_to_segmentation(self, patch_scores):\n\n with torch.no_grad():\n if isinstance(patch_scores, np.ndarray):\n patch_scores = torch.from_numpy(patch_scores)\n _scores = patch_scores.to(self.device)\n _scores = _scores.unsqueeze(1)\n _scores = F.interpolate(\n _scores, size=self.target_size, mode=\"bilinear\", align_corners=False\n )\n _scores = _scores.squeeze(1)\n patch_scores = _scores.cpu().numpy()\n\n return [\n ndimage.gaussian_filter(patch_score, sigma=self.smoothing)\n for patch_score in patch_scores\n ]\n\n\nclass NetworkFeatureAggregator(torch.nn.Module):\n \"\"\"Efficient extraction of network features.\"\"\"\n\n def __init__(self, backbone, layers_to_extract_from, device):\n super(NetworkFeatureAggregator, self).__init__()\n \"\"\"Extraction of network features.\n\n Runs a network only to the last layer of the list of layers where\n network features should be extracted from.\n\n Args:\n backbone: torchvision.model\n layers_to_extract_from: [list of str]\n \"\"\"\n self.layers_to_extract_from = layers_to_extract_from\n self.backbone = backbone\n self.device = device\n if not hasattr(backbone, \"hook_handles\"):\n self.backbone.hook_handles = []\n for handle in self.backbone.hook_handles:\n handle.remove()\n self.outputs = {}\n\n for extract_layer in layers_to_extract_from:\n forward_hook = ForwardHook(\n self.outputs, extract_layer, layers_to_extract_from[-1]\n )\n if \".\" in extract_layer:\n extract_block, extract_idx = extract_layer.split(\".\")\n network_layer = backbone.__dict__[\"_modules\"][extract_block]\n if extract_idx.isnumeric():\n extract_idx = int(extract_idx)\n network_layer = network_layer[extract_idx]\n else:\n network_layer = network_layer.__dict__[\"_modules\"][extract_idx]\n else:\n network_layer = backbone.__dict__[\"_modules\"][extract_layer]\n\n if isinstance(network_layer, torch.nn.Sequential):\n self.backbone.hook_handles.append(\n network_layer[-1].register_forward_hook(forward_hook)\n )\n else:\n self.backbone.hook_handles.append(\n network_layer.register_forward_hook(forward_hook)\n )\n self.to(self.device)\n\n def forward(self, images):\n self.outputs.clear()\n with torch.no_grad():\n # The backbone will throw an Exception once it reached the last\n # layer to compute features from. Computation will stop there.\n try:\n _ = self.backbone(images)\n except LastLayerToExtractReachedException:\n pass\n return self.outputs\n\n def feature_dimensions(self, input_shape):\n \"\"\"Computes the feature dimensions for all layers given input_shape.\"\"\"\n _input = torch.ones([1] + list(input_shape)).to(self.device)\n _output = self(_input)\n return [_output[layer].shape[1] for layer in self.layers_to_extract_from]\n\n\nclass ForwardHook:\n def __init__(self, hook_dict, layer_name: str, last_layer_to_extract: str):\n self.hook_dict = hook_dict\n self.layer_name = layer_name\n self.raise_exception_to_break = copy.deepcopy(\n layer_name == last_layer_to_extract\n )\n\n def __call__(self, module, input, output):\n self.hook_dict[self.layer_name] = output\n if self.raise_exception_to_break:\n raise LastLayerToExtractReachedException()\n return None\n\n\nclass LastLayerToExtractReachedException(Exception):\n pass\n\n\nclass NearestNeighbourScorer(object):\n def __init__(self, n_nearest_neighbours: int, nn_method=FaissNN(False, 4)) -> None:\n \"\"\"\n Neearest-Neighbourhood Anomaly Scorer class.\n\n Args:\n n_nearest_neighbours: [int] Number of nearest neighbours used to\n determine anomalous pixels.\n nn_method: Nearest neighbour search method.\n \"\"\"\n self.feature_merger = ConcatMerger()\n\n self.n_nearest_neighbours = n_nearest_neighbours\n self.nn_method = nn_method\n\n self.imagelevel_nn = lambda query: self.nn_method.run(\n n_nearest_neighbours, query\n )\n self.pixelwise_nn = lambda query, index: self.nn_method.run(1, query, index)\n\n def fit(self, detection_features: List[np.ndarray]) -> None:\n \"\"\"Calls the fit function of the nearest neighbour method.\n\n Args:\n detection_features: [list of np.arrays]\n [[bs x d_i] for i in n] Contains a list of\n np.arrays for all training images corresponding to respective\n features VECTORS (or maps, but will be resized) produced by\n some backbone network which should be used for image-level\n anomaly detection.\n \"\"\"\n self.detection_features = self.feature_merger.merge(\n detection_features,\n )\n self.nn_method.fit(self.detection_features)\n\n def predict(\n self, query_features: List[np.ndarray]\n ) -> Union[np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Predicts anomaly score.\n\n Searches for nearest neighbours of test images in all\n support training images.\n\n Args:\n detection_query_features: [dict of np.arrays] List of np.arrays\n corresponding to the test features generated by\n some backbone network.\n \"\"\"\n query_features = self.feature_merger.merge(\n query_features,\n )\n query_distances, query_nns = self.imagelevel_nn(query_features)\n anomaly_scores = np.mean(query_distances, axis=-1)\n return anomaly_scores, query_distances, query_nns\n\n @staticmethod\n def _detection_file(folder, prepend=\"\"):\n return os.path.join(folder, prepend + \"nnscorer_features.pkl\")\n\n @staticmethod\n def _index_file(folder, prepend=\"\"):\n return os.path.join(folder, prepend + \"nnscorer_search_index.faiss\")\n\n @staticmethod\n def _save(filename, features):\n if features is None:\n return\n with open(filename, \"wb\") as save_file:\n pickle.dump(features, save_file, pickle.HIGHEST_PROTOCOL)\n\n @staticmethod\n def _load(filename: str):\n with open(filename, \"rb\") as load_file:\n return pickle.load(load_file)\n\n def save(\n self,\n save_folder: str,\n save_features_separately: bool = False,\n prepend: str = \"\",\n ) -> None:", "metadata": {"task_id": "amazon-science--patchcore-inspection/31", "ground_truth": " self.nn_method.save(self._index_file(save_folder, prepend))\n if save_features_separately:\n self._save(\n self._detection_file(save_folder, prepend), self.detection_features\n )\n", "fpath_tuple": ["amazon-science_patchcore-inspection", "src", "patchcore", "common.py"], "context_start_lineno": 0, "lineno": 377, "function_name": "save"}, "groundtruth": " self.nn_method.save(self._index_file(save_folder, prepend))\n if save_features_separately:\n self._save(\n self._detection_file(save_folder, prepend), self.detection_features\n )\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport fnmatch\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional, Set, Type, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\nfrom .model_wrappers import MIMOHeadWrapper\n\n\ndef _unix_pattern_to_parameter_names(\n constraints: List[str], all_parameter_names: Set[str]\n) -> Union[None, Set[str]]:", "metadata": {"task_id": "facebookresearch--omnivore/0", "ground_truth": " parameter_names = []\n for param_name in constraints:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert (\n len(matching_parameters) > 0\n ), f\"param_names {param_name} don't match any param in the given names.\"\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "checkpoint_utils.py"], "context_start_lineno": 0, "lineno": 23, "function_name": "_unix_pattern_to_parameter_names"}, "groundtruth": " parameter_names = []\n for param_name in constraints:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert (\n len(matching_parameters) > 0\n ), f\"param_names {param_name} don't match any param in the given names.\"\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport fnmatch\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional, Set, Type, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\nfrom .model_wrappers import MIMOHeadWrapper\n\n\ndef _unix_pattern_to_parameter_names(\n constraints: List[str], all_parameter_names: Set[str]\n) -> Union[None, Set[str]]:\n\n parameter_names = []\n for param_name in constraints:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert (\n len(matching_parameters) > 0\n ), f\"param_names {param_name} don't match any param in the given names.\"\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\nclass CkptIncludeKernel:\n \"\"\"\n Includes only the keys from the given model state_dict that match the key_pattern.\n Rest of the keys are removed from the given state_dict.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"", "metadata": {"task_id": "facebookresearch--omnivore/1", "ground_truth": " include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "checkpoint_utils.py"], "context_start_lineno": 0, "lineno": 52, "function_name": "__call__"}, "groundtruth": " include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport fnmatch\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional, Set, Type, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\nfrom .model_wrappers import MIMOHeadWrapper\n\n\ndef _unix_pattern_to_parameter_names(\n constraints: List[str], all_parameter_names: Set[str]\n) -> Union[None, Set[str]]:\n\n parameter_names = []\n for param_name in constraints:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert (\n len(matching_parameters) > 0\n ), f\"param_names {param_name} don't match any param in the given names.\"\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\nclass CkptIncludeKernel:\n \"\"\"\n Includes only the keys from the given model state_dict that match the key_pattern.\n Rest of the keys are removed from the given state_dict.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptExcludeKernel:\n \"\"\"\n Removes the keys from the given model state_dict that match the key_pattern.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"", "metadata": {"task_id": "facebookresearch--omnivore/2", "ground_truth": " exclude_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n include_keys = set(state_dict.keys()) - exclude_keys\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "checkpoint_utils.py"], "context_start_lineno": 0, "lineno": 81, "function_name": "__call__"}, "groundtruth": " exclude_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n include_keys = set(state_dict.keys()) - exclude_keys\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport fnmatch\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional, Set, Type, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\nfrom .model_wrappers import MIMOHeadWrapper\n\n\ndef _unix_pattern_to_parameter_names(\n constraints: List[str], all_parameter_names: Set[str]\n) -> Union[None, Set[str]]:\n\n parameter_names = []\n for param_name in constraints:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert (\n len(matching_parameters) > 0\n ), f\"param_names {param_name} don't match any param in the given names.\"\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\nclass CkptIncludeKernel:\n \"\"\"\n Includes only the keys from the given model state_dict that match the key_pattern.\n Rest of the keys are removed from the given state_dict.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptExcludeKernel:\n \"\"\"\n Removes the keys from the given model state_dict that match the key_pattern.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n exclude_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n include_keys = set(state_dict.keys()) - exclude_keys\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptPrependKernel:\n \"\"\"\n Prepends the given pattern to all the keys in the checkpoint state dict after\n selecting them with key_pattern.\n\n For instance, if prepend_pattern = \"some_prepend.\" and\n key_pattern = [\"model.head\"], this kernel would prepend \"some_prepend.\" to\n \"model.key\", thus renaming the key \"model.head\" to \"some_prepend.model.head\".\n\n Args:\n prepend_pattern: The pattern to prepend the keys in the state_dict with.\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, prepend_pattern: str, key_pattern: List[str]):\n self.prepend_pattern = prepend_pattern\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n all_keys = set(state_dict.keys())\n\n include_keys = set(state_dict.keys())\n if self.key_pattern is not None:\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n excluded_keys = all_keys - include_keys\n\n # Add excluded keys from re-mapping\n new_state_dict = {}\n for k in excluded_keys:\n new_state_dict[k] = state_dict[k]\n\n # Add keys from remapping\n for key in include_keys:\n new_state_dict[self.prepend_pattern + key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptRenameWithCopyKernel:\n \"\"\"\n Renames and also optionally creates copyies of the key-value pairs in the checkpoint\n state dict. Before doing so, selects the keys to which to apply this kernel by\n using key_pattern.\n\n For instance, if source_pattern = \"model.head\" and\n target_patterns = [\"model.head_1\", \"model.head_2\"], this kernel would\n rename the key \"model.head\" to \"model.head_1\" and will also create a copy of the\n \"model.head\" and assign it a new name \"model.head_2\".\n\n Args:\n source_pattern: The pattern that needs to be renamed in the current\n checkpoint state_dict.\n target_patterns: A list of patterns to which the source_pattern is to be\n renamed to it. If the list has more than one element, it creates multiple\n copies of the source_pattern value and assigns then the names given in\n target_pattern.\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(\n self,\n source_pattern: str,\n target_patterns: List[str],\n key_pattern: Optional[List[str]] = None,\n ):", "metadata": {"task_id": "facebookresearch--omnivore/3", "ground_truth": " self.source_pattern = source_pattern\n self.target_patterns = target_patterns\n self.key_pattern = key_pattern\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "checkpoint_utils.py"], "context_start_lineno": 0, "lineno": 168, "function_name": "__init__"}, "groundtruth": " self.source_pattern = source_pattern\n self.target_patterns = target_patterns\n self.key_pattern = key_pattern\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport fnmatch\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional, Set, Type, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\nfrom .model_wrappers import MIMOHeadWrapper\n\n\ndef _unix_pattern_to_parameter_names(\n constraints: List[str], all_parameter_names: Set[str]\n) -> Union[None, Set[str]]:\n\n parameter_names = []\n for param_name in constraints:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert (\n len(matching_parameters) > 0\n ), f\"param_names {param_name} don't match any param in the given names.\"\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\nclass CkptIncludeKernel:\n \"\"\"\n Includes only the keys from the given model state_dict that match the key_pattern.\n Rest of the keys are removed from the given state_dict.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptExcludeKernel:\n \"\"\"\n Removes the keys from the given model state_dict that match the key_pattern.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n exclude_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n include_keys = set(state_dict.keys()) - exclude_keys\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptPrependKernel:\n \"\"\"\n Prepends the given pattern to all the keys in the checkpoint state dict after\n selecting them with key_pattern.\n\n For instance, if prepend_pattern = \"some_prepend.\" and\n key_pattern = [\"model.head\"], this kernel would prepend \"some_prepend.\" to\n \"model.key\", thus renaming the key \"model.head\" to \"some_prepend.model.head\".\n\n Args:\n prepend_pattern: The pattern to prepend the keys in the state_dict with.\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, prepend_pattern: str, key_pattern: List[str]):\n self.prepend_pattern = prepend_pattern\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n all_keys = set(state_dict.keys())\n\n include_keys = set(state_dict.keys())\n if self.key_pattern is not None:\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n excluded_keys = all_keys - include_keys\n\n # Add excluded keys from re-mapping\n new_state_dict = {}\n for k in excluded_keys:\n new_state_dict[k] = state_dict[k]\n\n # Add keys from remapping\n for key in include_keys:\n new_state_dict[self.prepend_pattern + key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptRenameWithCopyKernel:\n \"\"\"\n Renames and also optionally creates copyies of the key-value pairs in the checkpoint\n state dict. Before doing so, selects the keys to which to apply this kernel by\n using key_pattern.\n\n For instance, if source_pattern = \"model.head\" and\n target_patterns = [\"model.head_1\", \"model.head_2\"], this kernel would\n rename the key \"model.head\" to \"model.head_1\" and will also create a copy of the\n \"model.head\" and assign it a new name \"model.head_2\".\n\n Args:\n source_pattern: The pattern that needs to be renamed in the current\n checkpoint state_dict.\n target_patterns: A list of patterns to which the source_pattern is to be\n renamed to it. If the list has more than one element, it creates multiple\n copies of the source_pattern value and assigns then the names given in\n target_pattern.\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(\n self,\n source_pattern: str,\n target_patterns: List[str],\n key_pattern: Optional[List[str]] = None,\n ):\n self.source_pattern = source_pattern\n self.target_patterns = target_patterns\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n # Replaces only first occurences", "metadata": {"task_id": "facebookresearch--omnivore/4", "ground_truth": " all_keys = set(state_dict.keys())\n\n include_keys = set(state_dict.keys())\n if self.key_pattern is not None:\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n excluded_keys = all_keys - include_keys\n\n # Add excluded keys from re-mapping\n new_state_dict = {}\n for k in excluded_keys:\n new_state_dict[k] = state_dict[k]\n\n # Add keys from remapping\n for key in include_keys:\n if self.source_pattern in key:\n for target_pattern in self.target_patterns:\n new_key = key.replace(self.source_pattern, target_pattern, 1)\n new_state_dict[new_key] = state_dict[key]\n else:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "checkpoint_utils.py"], "context_start_lineno": 0, "lineno": 179, "function_name": "__call__"}, "groundtruth": " all_keys = set(state_dict.keys())\n\n include_keys = set(state_dict.keys())\n if self.key_pattern is not None:\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n excluded_keys = all_keys - include_keys\n\n # Add excluded keys from re-mapping\n new_state_dict = {}\n for k in excluded_keys:\n new_state_dict[k] = state_dict[k]\n\n # Add keys from remapping\n for key in include_keys:\n if self.source_pattern in key:\n for target_pattern in self.target_patterns:\n new_key = key.replace(self.source_pattern, target_pattern, 1)\n new_state_dict[new_key] = state_dict[key]\n else:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport fnmatch\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional, Set, Type, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\nfrom .model_wrappers import MIMOHeadWrapper\n\n\ndef _unix_pattern_to_parameter_names(\n constraints: List[str], all_parameter_names: Set[str]\n) -> Union[None, Set[str]]:\n\n parameter_names = []\n for param_name in constraints:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert (\n len(matching_parameters) > 0\n ), f\"param_names {param_name} don't match any param in the given names.\"\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\nclass CkptIncludeKernel:\n \"\"\"\n Includes only the keys from the given model state_dict that match the key_pattern.\n Rest of the keys are removed from the given state_dict.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptExcludeKernel:\n \"\"\"\n Removes the keys from the given model state_dict that match the key_pattern.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n exclude_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n include_keys = set(state_dict.keys()) - exclude_keys\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptPrependKernel:\n \"\"\"\n Prepends the given pattern to all the keys in the checkpoint state dict after\n selecting them with key_pattern.\n\n For instance, if prepend_pattern = \"some_prepend.\" and\n key_pattern = [\"model.head\"], this kernel would prepend \"some_prepend.\" to\n \"model.key\", thus renaming the key \"model.head\" to \"some_prepend.model.head\".\n\n Args:\n prepend_pattern: The pattern to prepend the keys in the state_dict with.\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, prepend_pattern: str, key_pattern: List[str]):\n self.prepend_pattern = prepend_pattern\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n all_keys = set(state_dict.keys())\n\n include_keys = set(state_dict.keys())\n if self.key_pattern is not None:\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n excluded_keys = all_keys - include_keys\n\n # Add excluded keys from re-mapping\n new_state_dict = {}\n for k in excluded_keys:\n new_state_dict[k] = state_dict[k]\n\n # Add keys from remapping\n for key in include_keys:\n new_state_dict[self.prepend_pattern + key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptRenameWithCopyKernel:\n \"\"\"\n Renames and also optionally creates copyies of the key-value pairs in the checkpoint\n state dict. Before doing so, selects the keys to which to apply this kernel by\n using key_pattern.\n\n For instance, if source_pattern = \"model.head\" and\n target_patterns = [\"model.head_1\", \"model.head_2\"], this kernel would\n rename the key \"model.head\" to \"model.head_1\" and will also create a copy of the\n \"model.head\" and assign it a new name \"model.head_2\".\n\n Args:\n source_pattern: The pattern that needs to be renamed in the current\n checkpoint state_dict.\n target_patterns: A list of patterns to which the source_pattern is to be\n renamed to it. If the list has more than one element, it creates multiple\n copies of the source_pattern value and assigns then the names given in\n target_pattern.\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(\n self,\n source_pattern: str,\n target_patterns: List[str],\n key_pattern: Optional[List[str]] = None,\n ):\n self.source_pattern = source_pattern\n self.target_patterns = target_patterns\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n # Replaces only first occurences\n all_keys = set(state_dict.keys())\n\n include_keys = set(state_dict.keys())\n if self.key_pattern is not None:\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n excluded_keys = all_keys - include_keys\n\n # Add excluded keys from re-mapping\n new_state_dict = {}\n for k in excluded_keys:\n new_state_dict[k] = state_dict[k]\n\n # Add keys from remapping\n for key in include_keys:\n if self.source_pattern in key:\n for target_pattern in self.target_patterns:\n new_key = key.replace(self.source_pattern, target_pattern, 1)\n new_state_dict[new_key] = state_dict[key]\n else:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\ndef load_checkpoint(\n path_list: List[str],\n pick_recursive_keys: Optional[List[str]] = None,\n map_location: str = \"cpu\",\n) -> Any:\n \"\"\"\n Loads a checkpoint from the specified path.\n\n Args:\n path_list: A list of paths which contain the checkpoint. Each element\n is tried (in order) until a file that exists is found. That file is then\n used to read the checkpoint.\n pick_recursive_keys: Picks sub dicts from the loaded checkpoint if not None.\n For pick_recursive_keys = [\"a\", \"b\"], will return checkpoint_dict[\"a\"][\"b\"]\n map_location (str): a function, torch.device, string or a dict specifying how to\n remap storage locations\n\n Returns: Model with the matchin pre-trained weights loaded.\n \"\"\"\n path_exists = False\n for path in path_list:\n if g_pathmgr.exists(path):\n path_exists = True\n break\n\n if not path_exists:\n raise ValueError(f\"No path exists in {path_list}\")\n\n with g_pathmgr.open(path, \"rb\") as f:\n checkpoint = torch.load(f, map_location=map_location)\n\n logging.info(f\"Loaded checkpoint from {path}\")\n if pick_recursive_keys is not None:\n for key in pick_recursive_keys:\n checkpoint = checkpoint[key]\n return checkpoint\n\n\ndef load_checkpoint_and_apply_kernels(\n checkpoint_path: str,\n checkpoint_kernels: List[Callable] = None,\n ckpt_state_dict_key: str = \"state_dict\",\n map_location: str = None,\n) -> nn.Module:\n \"\"\"\n Performs checkpoint loading with a variety of pre-processing kernel applied in\n sequence.\n\n Args:\n checkpoint_path (str): Path to the checkpoint.\n checkpoint_kernels List(Callable): A list of checkpoint processing kernels\n to apply in the specified order. Supported kernels include `CkptIncludeKernel`,\n `CkptExcludeKernel`, etc. These kernels are applied in the\n given order.\n ckpt_state_dict_key (str): Key containing the model state dict.\n map_location (str): a function, torch.device, string or a dict specifying how to\n remap storage locations\n\n Returns: Model with the matchin pre-trained weights loaded.\n \"\"\"", "metadata": {"task_id": "facebookresearch--omnivore/5", "ground_truth": " assert g_pathmgr.exists(checkpoint_path), \"Checkpoint '{}' not found\".format(\n checkpoint_path\n )\n\n # Load the checkpoint on CPU to avoid GPU mem spike.\n with g_pathmgr.open(checkpoint_path, \"rb\") as f:\n checkpoint = torch.load(f, map_location=map_location)\n\n pre_train_dict = (\n checkpoint[ckpt_state_dict_key] if ckpt_state_dict_key else checkpoint\n )\n\n logging.info(\n \"Loaded Checkpoint State Dict pre-kernel application: %s\"\n % str(\", \".join(list(pre_train_dict.keys())))\n )\n # Apply kernels\n if checkpoint_kernels is not None:\n for f in checkpoint_kernels:\n pre_train_dict = f(state_dict=pre_train_dict)\n\n logging.info(\n \"Loaded Checkpoint State Dict Post-kernel application %s\"\n % str(\", \".join(list(pre_train_dict.keys())))\n )\n\n return pre_train_dict\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "checkpoint_utils.py"], "context_start_lineno": 0, "lineno": 266, "function_name": "load_checkpoint_and_apply_kernels"}, "groundtruth": " assert g_pathmgr.exists(checkpoint_path), \"Checkpoint '{}' not found\".format(\n checkpoint_path\n )\n\n # Load the checkpoint on CPU to avoid GPU mem spike.\n with g_pathmgr.open(checkpoint_path, \"rb\") as f:\n checkpoint = torch.load(f, map_location=map_location)\n\n pre_train_dict = (\n checkpoint[ckpt_state_dict_key] if ckpt_state_dict_key else checkpoint\n )\n\n logging.info(\n \"Loaded Checkpoint State Dict pre-kernel application: %s\"\n % str(\", \".join(list(pre_train_dict.keys())))\n )\n # Apply kernels\n if checkpoint_kernels is not None:\n for f in checkpoint_kernels:\n pre_train_dict = f(state_dict=pre_train_dict)\n\n logging.info(\n \"Loaded Checkpoint State Dict Post-kernel application %s\"\n % str(\", \".join(list(pre_train_dict.keys())))\n )\n\n return pre_train_dict\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport fnmatch\nimport logging\nfrom typing import Any, Callable, Dict, List, Optional, Set, Type, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom iopath.common.file_io import g_pathmgr\nfrom omegaconf import OmegaConf\n\nfrom .model_wrappers import MIMOHeadWrapper\n\n\ndef _unix_pattern_to_parameter_names(\n constraints: List[str], all_parameter_names: Set[str]\n) -> Union[None, Set[str]]:\n\n parameter_names = []\n for param_name in constraints:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert (\n len(matching_parameters) > 0\n ), f\"param_names {param_name} don't match any param in the given names.\"\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\nclass CkptIncludeKernel:\n \"\"\"\n Includes only the keys from the given model state_dict that match the key_pattern.\n Rest of the keys are removed from the given state_dict.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptExcludeKernel:\n \"\"\"\n Removes the keys from the given model state_dict that match the key_pattern.\n\n Args:\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, key_pattern: List[str]):\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n exclude_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n include_keys = set(state_dict.keys()) - exclude_keys\n\n new_state_dict = {}\n for key in include_keys:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptPrependKernel:\n \"\"\"\n Prepends the given pattern to all the keys in the checkpoint state dict after\n selecting them with key_pattern.\n\n For instance, if prepend_pattern = \"some_prepend.\" and\n key_pattern = [\"model.head\"], this kernel would prepend \"some_prepend.\" to\n \"model.key\", thus renaming the key \"model.head\" to \"some_prepend.model.head\".\n\n Args:\n prepend_pattern: The pattern to prepend the keys in the state_dict with.\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(self, prepend_pattern: str, key_pattern: List[str]):\n self.prepend_pattern = prepend_pattern\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n all_keys = set(state_dict.keys())\n\n include_keys = set(state_dict.keys())\n if self.key_pattern is not None:\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n excluded_keys = all_keys - include_keys\n\n # Add excluded keys from re-mapping\n new_state_dict = {}\n for k in excluded_keys:\n new_state_dict[k] = state_dict[k]\n\n # Add keys from remapping\n for key in include_keys:\n new_state_dict[self.prepend_pattern + key] = state_dict[key]\n\n return new_state_dict\n\n\nclass CkptRenameWithCopyKernel:\n \"\"\"\n Renames and also optionally creates copyies of the key-value pairs in the checkpoint\n state dict. Before doing so, selects the keys to which to apply this kernel by\n using key_pattern.\n\n For instance, if source_pattern = \"model.head\" and\n target_patterns = [\"model.head_1\", \"model.head_2\"], this kernel would\n rename the key \"model.head\" to \"model.head_1\" and will also create a copy of the\n \"model.head\" and assign it a new name \"model.head_2\".\n\n Args:\n source_pattern: The pattern that needs to be renamed in the current\n checkpoint state_dict.\n target_patterns: A list of patterns to which the source_pattern is to be\n renamed to it. If the list has more than one element, it creates multiple\n copies of the source_pattern value and assigns then the names given in\n target_pattern.\n key_pattern: Patterns used to select the keys in the state_dict\n that are eligible for this kernel.\n \"\"\"\n\n def __init__(\n self,\n source_pattern: str,\n target_patterns: List[str],\n key_pattern: Optional[List[str]] = None,\n ):\n self.source_pattern = source_pattern\n self.target_patterns = target_patterns\n self.key_pattern = key_pattern\n\n def __call__(self, state_dict: Dict):\n \"\"\"\n Args:\n state_dict: A dictionary representing the given checkpoint's state dict.\n \"\"\"\n\n # Replaces only first occurences\n all_keys = set(state_dict.keys())\n\n include_keys = set(state_dict.keys())\n if self.key_pattern is not None:\n include_keys = _unix_pattern_to_parameter_names(\n self.key_pattern, state_dict.keys()\n )\n\n excluded_keys = all_keys - include_keys\n\n # Add excluded keys from re-mapping\n new_state_dict = {}\n for k in excluded_keys:\n new_state_dict[k] = state_dict[k]\n\n # Add keys from remapping\n for key in include_keys:\n if self.source_pattern in key:\n for target_pattern in self.target_patterns:\n new_key = key.replace(self.source_pattern, target_pattern, 1)\n new_state_dict[new_key] = state_dict[key]\n else:\n new_state_dict[key] = state_dict[key]\n\n return new_state_dict\n\n\ndef load_checkpoint(\n path_list: List[str],\n pick_recursive_keys: Optional[List[str]] = None,\n map_location: str = \"cpu\",\n) -> Any:\n \"\"\"\n Loads a checkpoint from the specified path.\n\n Args:\n path_list: A list of paths which contain the checkpoint. Each element\n is tried (in order) until a file that exists is found. That file is then\n used to read the checkpoint.\n pick_recursive_keys: Picks sub dicts from the loaded checkpoint if not None.\n For pick_recursive_keys = [\"a\", \"b\"], will return checkpoint_dict[\"a\"][\"b\"]\n map_location (str): a function, torch.device, string or a dict specifying how to\n remap storage locations\n\n Returns: Model with the matchin pre-trained weights loaded.\n \"\"\"\n path_exists = False\n for path in path_list:\n if g_pathmgr.exists(path):\n path_exists = True\n break\n\n if not path_exists:\n raise ValueError(f\"No path exists in {path_list}\")\n\n with g_pathmgr.open(path, \"rb\") as f:\n checkpoint = torch.load(f, map_location=map_location)\n\n logging.info(f\"Loaded checkpoint from {path}\")\n if pick_recursive_keys is not None:\n for key in pick_recursive_keys:\n checkpoint = checkpoint[key]\n return checkpoint\n\n\ndef load_checkpoint_and_apply_kernels(\n checkpoint_path: str,\n checkpoint_kernels: List[Callable] = None,\n ckpt_state_dict_key: str = \"state_dict\",\n map_location: str = None,\n) -> nn.Module:\n \"\"\"\n Performs checkpoint loading with a variety of pre-processing kernel applied in\n sequence.\n\n Args:\n checkpoint_path (str): Path to the checkpoint.\n checkpoint_kernels List(Callable): A list of checkpoint processing kernels\n to apply in the specified order. Supported kernels include `CkptIncludeKernel`,\n `CkptExcludeKernel`, etc. These kernels are applied in the\n given order.\n ckpt_state_dict_key (str): Key containing the model state dict.\n map_location (str): a function, torch.device, string or a dict specifying how to\n remap storage locations\n\n Returns: Model with the matchin pre-trained weights loaded.\n \"\"\"\n assert g_pathmgr.exists(checkpoint_path), \"Checkpoint '{}' not found\".format(\n checkpoint_path\n )\n\n # Load the checkpoint on CPU to avoid GPU mem spike.\n with g_pathmgr.open(checkpoint_path, \"rb\") as f:\n checkpoint = torch.load(f, map_location=map_location)\n\n pre_train_dict = (\n checkpoint[ckpt_state_dict_key] if ckpt_state_dict_key else checkpoint\n )\n\n logging.info(\n \"Loaded Checkpoint State Dict pre-kernel application: %s\"\n % str(\", \".join(list(pre_train_dict.keys())))\n )\n # Apply kernels\n if checkpoint_kernels is not None:\n for f in checkpoint_kernels:\n pre_train_dict = f(state_dict=pre_train_dict)\n\n logging.info(\n \"Loaded Checkpoint State Dict Post-kernel application %s\"\n % str(\", \".join(list(pre_train_dict.keys())))\n )\n\n return pre_train_dict\n\n\ndef load_state_dict_into_model(state_dict: Dict, model: nn.Module, strict: bool = True):\n \"\"\"\n Loads a state dict into the given model.\n\n Args:\n state_dict: A dictionary containing the model's\n state dict, or a subset if strict is False\n model: Model to load the checkpoint weights into\n strict: raise if the state_dict has missing state keys\n \"\"\"", "metadata": {"task_id": "facebookresearch--omnivore/6", "ground_truth": " missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)\n err = \"State key mismatch.\"\n if unexpected_keys:\n err += f\" Unexpected keys: {unexpected_keys}.\"\n if missing_keys:\n err += f\" Missing keys: {missing_keys}.\"\n if unexpected_keys or missing_keys:\n if not unexpected_keys and not strict:\n logging.warning(err)\n else:\n raise KeyError(err)\n return model\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "checkpoint_utils.py"], "context_start_lineno": 0, "lineno": 305, "function_name": "load_state_dict_into_model"}, "groundtruth": " missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False)\n err = \"State key mismatch.\"\n if unexpected_keys:\n err += f\" Unexpected keys: {unexpected_keys}.\"\n if missing_keys:\n err += f\" Missing keys: {missing_keys}.\"\n if unexpected_keys or missing_keys:\n if not unexpected_keys and not strict:\n logging.warning(err)\n else:\n raise KeyError(err)\n return model\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Mapping, Optional, Sequence\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom omnivision.data.api import VisionSample\n\n\nclass MIMOHeadWrapper(nn.Module):\n \"\"\"Attaches multiple input multiple output heads to the trunk using forward hooks.\n\n Args:\n trunk: Any model to which you want to attach the heads to.\n heads: A list of dicts with the following keys:\n fork_module: The module which the head will be applied to. It can be an\n empty string, in which case the head is attached to the trunk's output.\n head: The head which is to be attached.\n input_key: The head will only run on inputs with this key. If set to\n `None` the head will be applied to all inputs.\n output_key: The head will produce this output key. If set to `None`, the\n output key will be the same as the input key.\n\n An example heads value can look like -\n ```\n [\n {\n \"fork_module\": \"layer_1.layer_a.layer_alpha\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_1\",\n \"output_key\": \"out_1\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_1\",\n \"output_key\": \"out_2\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_2\",\n \"output_key\": \"out_3\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Conv2d(in_feat, out_feat),\n \"input_key\": None,\n \"output_key\": None,\n },\n ]\n ```\n trunk_fields: A list of dicts with the following keys:\n input_key: The input key this rule applies to. If `None`, applies to all\n inputs.\n args: These specific keys will be fetched from the sample and passed as\n *args to the trunk for the specified `input_key`.\n kwargs: These specific keys will be fetched from the sample and passed as\n **kwargs to the trunk for the specified `input_key`.\n\n Example -\n ```\n [\n {\n \"input_key\": \"dataset_1\",\n \"args\": [\"vision\"]\n },\n {\n \"input_key\": \"dataset_2\",\n \"args\": [\"vision\"],\n \"kwargs\": {\"mask\": \"mask\"}\n },\n ]\n ```\n\n Note that two heads cannot produce the same output key in the same forward pass.\n\n Returns:\n A dict with keys corresponding to the output keys which match with the input key.\n \"\"\"\n\n @dataclass\n class HeadArgs:\n fork_module: str\n head: nn.Module\n input_key: Optional[str]\n output_key: Optional[str]\n\n @dataclass\n class TrunkFieldArgs:\n input_key: Optional[str]\n args: List[str] = field(default_factory=list)\n kwargs: Dict[str, str] = field(default_factory=dict)\n\n def __init__(\n self,\n trunk: nn.Module,\n heads: List[Dict],\n trunk_fields: List[Dict],\n handle_list_inputs=False,\n ) -> None:\n \"\"\"WARNING: handle_list_inputs is a hack which needs to be refactored away.\"\"\"\n super().__init__()\n\n self.trunk = trunk\n self.handle_list_inputs = handle_list_inputs\n\n # cast to HeadArgs for input validation\n heads = [self.HeadArgs(**head_dict) for head_dict in heads]\n # cast to TrunkFieldArgs for input validation\n trunk_fields = [\n self.TrunkFieldArgs(**trunk_fields_dict)\n for trunk_fields_dict in trunk_fields\n ]\n\n self.head_name_to_fork_module = {}\n self.heads = nn.ModuleList()\n self.head_input_keys = []\n self.head_output_keys = []\n self.head_fork_modules = []\n\n for head_args in heads:\n self.heads.append(head_args.head)\n self.head_input_keys.append(head_args.input_key)\n self.head_output_keys.append(head_args.output_key)\n self.head_fork_modules.append(head_args.fork_module)\n\n self.trunk_field_args = {}\n self.trunk_field_kwargs = {}\n for trunk_fields_elem in trunk_fields:\n input_key = trunk_fields_elem.input_key\n if input_key in self.trunk_field_args:\n raise KeyError(\n f\"Multiple trunk_fields specified for the same input_key: {input_key}\"\n )\n self.trunk_field_args[input_key] = trunk_fields_elem.args\n self.trunk_field_kwargs[input_key] = trunk_fields_elem.kwargs\n\n # outputs is used as a temporary storage of the head outputs\n self.outputs = {}\n\n # input_key is used to specify which key is currently being processed\n self.input_key = None\n\n # handles to the hooks which can be used for removing the hooks if needed\n self.hook_handles = []\n self._register_hooks()\n\n def _register_hooks(self):\n for i, head in enumerate(self.heads):\n fork_module_name = self.head_fork_modules[i]\n\n def hook_fn(\n module,\n module_in,\n module_out,\n # the following variables are passed as kwargs in the closure to avoid\n # late binding in python\n head_method=head,\n in_key=self.head_input_keys[i],\n out_key=self.head_output_keys[i],\n ):", "metadata": {"task_id": "facebookresearch--omnivore/7", "ground_truth": " if in_key is not None and self.input_key != in_key:\n return\n if out_key is None:\n out_key = self.input_key\n if out_key in self.outputs:\n # reset state before raising\n self.outputs = {}\n self.input_key = None\n raise ValueError(\n f\"Two heads produced the same output key `{out_key}` during forward\"\n )\n self.outputs[out_key] = head_method(module_out)\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "model_wrappers.py"], "context_start_lineno": 0, "lineno": 170, "function_name": "hook_fn"}, "groundtruth": " if in_key is not None and self.input_key != in_key:\n return\n if out_key is None:\n out_key = self.input_key\n if out_key in self.outputs:\n # reset state before raising\n self.outputs = {}\n self.input_key = None\n raise ValueError(\n f\"Two heads produced the same output key `{out_key}` during forward\"\n )\n self.outputs[out_key] = head_method(module_out)\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Mapping, Optional, Sequence\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom omnivision.data.api import VisionSample\n\n\nclass MIMOHeadWrapper(nn.Module):\n \"\"\"Attaches multiple input multiple output heads to the trunk using forward hooks.\n\n Args:\n trunk: Any model to which you want to attach the heads to.\n heads: A list of dicts with the following keys:\n fork_module: The module which the head will be applied to. It can be an\n empty string, in which case the head is attached to the trunk's output.\n head: The head which is to be attached.\n input_key: The head will only run on inputs with this key. If set to\n `None` the head will be applied to all inputs.\n output_key: The head will produce this output key. If set to `None`, the\n output key will be the same as the input key.\n\n An example heads value can look like -\n ```\n [\n {\n \"fork_module\": \"layer_1.layer_a.layer_alpha\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_1\",\n \"output_key\": \"out_1\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_1\",\n \"output_key\": \"out_2\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_2\",\n \"output_key\": \"out_3\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Conv2d(in_feat, out_feat),\n \"input_key\": None,\n \"output_key\": None,\n },\n ]\n ```\n trunk_fields: A list of dicts with the following keys:\n input_key: The input key this rule applies to. If `None`, applies to all\n inputs.\n args: These specific keys will be fetched from the sample and passed as\n *args to the trunk for the specified `input_key`.\n kwargs: These specific keys will be fetched from the sample and passed as\n **kwargs to the trunk for the specified `input_key`.\n\n Example -\n ```\n [\n {\n \"input_key\": \"dataset_1\",\n \"args\": [\"vision\"]\n },\n {\n \"input_key\": \"dataset_2\",\n \"args\": [\"vision\"],\n \"kwargs\": {\"mask\": \"mask\"}\n },\n ]\n ```\n\n Note that two heads cannot produce the same output key in the same forward pass.\n\n Returns:\n A dict with keys corresponding to the output keys which match with the input key.\n \"\"\"\n\n @dataclass\n class HeadArgs:\n fork_module: str\n head: nn.Module\n input_key: Optional[str]\n output_key: Optional[str]\n\n @dataclass\n class TrunkFieldArgs:\n input_key: Optional[str]\n args: List[str] = field(default_factory=list)\n kwargs: Dict[str, str] = field(default_factory=dict)\n\n def __init__(\n self,\n trunk: nn.Module,\n heads: List[Dict],\n trunk_fields: List[Dict],\n handle_list_inputs=False,\n ) -> None:\n \"\"\"WARNING: handle_list_inputs is a hack which needs to be refactored away.\"\"\"\n super().__init__()\n\n self.trunk = trunk\n self.handle_list_inputs = handle_list_inputs\n\n # cast to HeadArgs for input validation\n heads = [self.HeadArgs(**head_dict) for head_dict in heads]\n # cast to TrunkFieldArgs for input validation\n trunk_fields = [\n self.TrunkFieldArgs(**trunk_fields_dict)\n for trunk_fields_dict in trunk_fields\n ]\n\n self.head_name_to_fork_module = {}\n self.heads = nn.ModuleList()\n self.head_input_keys = []\n self.head_output_keys = []\n self.head_fork_modules = []\n\n for head_args in heads:\n self.heads.append(head_args.head)\n self.head_input_keys.append(head_args.input_key)\n self.head_output_keys.append(head_args.output_key)\n self.head_fork_modules.append(head_args.fork_module)\n\n self.trunk_field_args = {}\n self.trunk_field_kwargs = {}\n for trunk_fields_elem in trunk_fields:\n input_key = trunk_fields_elem.input_key\n if input_key in self.trunk_field_args:\n raise KeyError(\n f\"Multiple trunk_fields specified for the same input_key: {input_key}\"\n )\n self.trunk_field_args[input_key] = trunk_fields_elem.args\n self.trunk_field_kwargs[input_key] = trunk_fields_elem.kwargs\n\n # outputs is used as a temporary storage of the head outputs\n self.outputs = {}\n\n # input_key is used to specify which key is currently being processed\n self.input_key = None\n\n # handles to the hooks which can be used for removing the hooks if needed\n self.hook_handles = []\n self._register_hooks()\n\n def _register_hooks(self):\n for i, head in enumerate(self.heads):\n fork_module_name = self.head_fork_modules[i]\n\n def hook_fn(\n module,\n module_in,\n module_out,\n # the following variables are passed as kwargs in the closure to avoid\n # late binding in python\n head_method=head,\n in_key=self.head_input_keys[i],\n out_key=self.head_output_keys[i],\n ):\n if in_key is not None and self.input_key != in_key:\n return\n if out_key is None:\n out_key = self.input_key\n if out_key in self.outputs:\n # reset state before raising\n self.outputs = {}\n self.input_key = None\n raise ValueError(\n f\"Two heads produced the same output key `{out_key}` during forward\"\n )\n self.outputs[out_key] = head_method(module_out)\n\n fork_module = self.trunk.get_submodule(fork_module_name)\n self.hook_handles.append(fork_module.register_forward_hook(hook_fn))\n\n def _get_trunk_fields(self):", "metadata": {"task_id": "facebookresearch--omnivore/8", "ground_truth": " fields_args = self.trunk_field_args.get(self.input_key)\n fields_kwargs = self.trunk_field_kwargs.get(self.input_key)\n if fields_args is None:\n assert fields_kwargs is None\n fields_args = self.trunk_field_args.get(None)\n fields_kwargs = self.trunk_field_kwargs.get(None)\n if fields_args is None:\n assert fields_kwargs is None\n raise ValueError(\n f\"No trunk fields specified for input key: {self.input_key}\"\n )\n return fields_args, fields_kwargs\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "model_wrappers.py"], "context_start_lineno": 0, "lineno": 187, "function_name": "_get_trunk_fields"}, "groundtruth": " fields_args = self.trunk_field_args.get(self.input_key)\n fields_kwargs = self.trunk_field_kwargs.get(self.input_key)\n if fields_args is None:\n assert fields_kwargs is None\n fields_args = self.trunk_field_args.get(None)\n fields_kwargs = self.trunk_field_kwargs.get(None)\n if fields_args is None:\n assert fields_kwargs is None\n raise ValueError(\n f\"No trunk fields specified for input key: {self.input_key}\"\n )\n return fields_args, fields_kwargs\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Mapping, Optional, Sequence\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom omnivision.data.api import VisionSample\n\n\nclass MIMOHeadWrapper(nn.Module):\n \"\"\"Attaches multiple input multiple output heads to the trunk using forward hooks.\n\n Args:\n trunk: Any model to which you want to attach the heads to.\n heads: A list of dicts with the following keys:\n fork_module: The module which the head will be applied to. It can be an\n empty string, in which case the head is attached to the trunk's output.\n head: The head which is to be attached.\n input_key: The head will only run on inputs with this key. If set to\n `None` the head will be applied to all inputs.\n output_key: The head will produce this output key. If set to `None`, the\n output key will be the same as the input key.\n\n An example heads value can look like -\n ```\n [\n {\n \"fork_module\": \"layer_1.layer_a.layer_alpha\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_1\",\n \"output_key\": \"out_1\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_1\",\n \"output_key\": \"out_2\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_2\",\n \"output_key\": \"out_3\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Conv2d(in_feat, out_feat),\n \"input_key\": None,\n \"output_key\": None,\n },\n ]\n ```\n trunk_fields: A list of dicts with the following keys:\n input_key: The input key this rule applies to. If `None`, applies to all\n inputs.\n args: These specific keys will be fetched from the sample and passed as\n *args to the trunk for the specified `input_key`.\n kwargs: These specific keys will be fetched from the sample and passed as\n **kwargs to the trunk for the specified `input_key`.\n\n Example -\n ```\n [\n {\n \"input_key\": \"dataset_1\",\n \"args\": [\"vision\"]\n },\n {\n \"input_key\": \"dataset_2\",\n \"args\": [\"vision\"],\n \"kwargs\": {\"mask\": \"mask\"}\n },\n ]\n ```\n\n Note that two heads cannot produce the same output key in the same forward pass.\n\n Returns:\n A dict with keys corresponding to the output keys which match with the input key.\n \"\"\"\n\n @dataclass\n class HeadArgs:\n fork_module: str\n head: nn.Module\n input_key: Optional[str]\n output_key: Optional[str]\n\n @dataclass\n class TrunkFieldArgs:\n input_key: Optional[str]\n args: List[str] = field(default_factory=list)\n kwargs: Dict[str, str] = field(default_factory=dict)\n\n def __init__(\n self,\n trunk: nn.Module,\n heads: List[Dict],\n trunk_fields: List[Dict],\n handle_list_inputs=False,\n ) -> None:\n \"\"\"WARNING: handle_list_inputs is a hack which needs to be refactored away.\"\"\"\n super().__init__()\n\n self.trunk = trunk\n self.handle_list_inputs = handle_list_inputs\n\n # cast to HeadArgs for input validation\n heads = [self.HeadArgs(**head_dict) for head_dict in heads]\n # cast to TrunkFieldArgs for input validation\n trunk_fields = [\n self.TrunkFieldArgs(**trunk_fields_dict)\n for trunk_fields_dict in trunk_fields\n ]\n\n self.head_name_to_fork_module = {}\n self.heads = nn.ModuleList()\n self.head_input_keys = []\n self.head_output_keys = []\n self.head_fork_modules = []\n\n for head_args in heads:\n self.heads.append(head_args.head)\n self.head_input_keys.append(head_args.input_key)\n self.head_output_keys.append(head_args.output_key)\n self.head_fork_modules.append(head_args.fork_module)\n\n self.trunk_field_args = {}\n self.trunk_field_kwargs = {}\n for trunk_fields_elem in trunk_fields:\n input_key = trunk_fields_elem.input_key\n if input_key in self.trunk_field_args:\n raise KeyError(\n f\"Multiple trunk_fields specified for the same input_key: {input_key}\"\n )\n self.trunk_field_args[input_key] = trunk_fields_elem.args\n self.trunk_field_kwargs[input_key] = trunk_fields_elem.kwargs\n\n # outputs is used as a temporary storage of the head outputs\n self.outputs = {}\n\n # input_key is used to specify which key is currently being processed\n self.input_key = None\n\n # handles to the hooks which can be used for removing the hooks if needed\n self.hook_handles = []\n self._register_hooks()\n\n def _register_hooks(self):\n for i, head in enumerate(self.heads):\n fork_module_name = self.head_fork_modules[i]\n\n def hook_fn(\n module,\n module_in,\n module_out,\n # the following variables are passed as kwargs in the closure to avoid\n # late binding in python\n head_method=head,\n in_key=self.head_input_keys[i],\n out_key=self.head_output_keys[i],\n ):\n if in_key is not None and self.input_key != in_key:\n return\n if out_key is None:\n out_key = self.input_key\n if out_key in self.outputs:\n # reset state before raising\n self.outputs = {}\n self.input_key = None\n raise ValueError(\n f\"Two heads produced the same output key `{out_key}` during forward\"\n )\n self.outputs[out_key] = head_method(module_out)\n\n fork_module = self.trunk.get_submodule(fork_module_name)\n self.hook_handles.append(fork_module.register_forward_hook(hook_fn))\n\n def _get_trunk_fields(self):\n fields_args = self.trunk_field_args.get(self.input_key)\n fields_kwargs = self.trunk_field_kwargs.get(self.input_key)\n if fields_args is None:\n assert fields_kwargs is None\n fields_args = self.trunk_field_args.get(None)\n fields_kwargs = self.trunk_field_kwargs.get(None)\n if fields_args is None:\n assert fields_kwargs is None\n raise ValueError(\n f\"No trunk fields specified for input key: {self.input_key}\"\n )\n return fields_args, fields_kwargs\n\n def forward_sub_batch(self, sub_batch, *args, **kwargs):", "metadata": {"task_id": "facebookresearch--omnivore/9", "ground_truth": " assert isinstance(sub_batch, VisionSample), f\"Received {type(sub_batch)}\"\n fields_args, fields_kwargs = self._get_trunk_fields()\n sample_args = [getattr(sub_batch, arg) for arg in fields_args]\n sample_kwargs = {\n key: getattr(sub_batch, field) for key, field in fields_kwargs.items()\n }\n self.trunk(*sample_args, *args, **sample_kwargs, **kwargs)\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "model_wrappers.py"], "context_start_lineno": 0, "lineno": 201, "function_name": "forward_sub_batch"}, "groundtruth": " assert isinstance(sub_batch, VisionSample), f\"Received {type(sub_batch)}\"\n fields_args, fields_kwargs = self._get_trunk_fields()\n sample_args = [getattr(sub_batch, arg) for arg in fields_args]\n sample_kwargs = {\n key: getattr(sub_batch, field) for key, field in fields_kwargs.items()\n }\n self.trunk(*sample_args, *args, **sample_kwargs, **kwargs)\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport copy\nfrom dataclasses import dataclass, field\nfrom typing import Dict, List, Mapping, Optional, Sequence\n\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nfrom omnivision.data.api import VisionSample\n\n\nclass MIMOHeadWrapper(nn.Module):\n \"\"\"Attaches multiple input multiple output heads to the trunk using forward hooks.\n\n Args:\n trunk: Any model to which you want to attach the heads to.\n heads: A list of dicts with the following keys:\n fork_module: The module which the head will be applied to. It can be an\n empty string, in which case the head is attached to the trunk's output.\n head: The head which is to be attached.\n input_key: The head will only run on inputs with this key. If set to\n `None` the head will be applied to all inputs.\n output_key: The head will produce this output key. If set to `None`, the\n output key will be the same as the input key.\n\n An example heads value can look like -\n ```\n [\n {\n \"fork_module\": \"layer_1.layer_a.layer_alpha\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_1\",\n \"output_key\": \"out_1\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_1\",\n \"output_key\": \"out_2\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Linear(in_feat, out_feat),\n \"input_key\": \"dataset_2\",\n \"output_key\": \"out_3\",\n },\n {\n \"fork_module\": \"\",\n \"head\": nn.Conv2d(in_feat, out_feat),\n \"input_key\": None,\n \"output_key\": None,\n },\n ]\n ```\n trunk_fields: A list of dicts with the following keys:\n input_key: The input key this rule applies to. If `None`, applies to all\n inputs.\n args: These specific keys will be fetched from the sample and passed as\n *args to the trunk for the specified `input_key`.\n kwargs: These specific keys will be fetched from the sample and passed as\n **kwargs to the trunk for the specified `input_key`.\n\n Example -\n ```\n [\n {\n \"input_key\": \"dataset_1\",\n \"args\": [\"vision\"]\n },\n {\n \"input_key\": \"dataset_2\",\n \"args\": [\"vision\"],\n \"kwargs\": {\"mask\": \"mask\"}\n },\n ]\n ```\n\n Note that two heads cannot produce the same output key in the same forward pass.\n\n Returns:\n A dict with keys corresponding to the output keys which match with the input key.\n \"\"\"\n\n @dataclass\n class HeadArgs:\n fork_module: str\n head: nn.Module\n input_key: Optional[str]\n output_key: Optional[str]\n\n @dataclass\n class TrunkFieldArgs:\n input_key: Optional[str]\n args: List[str] = field(default_factory=list)\n kwargs: Dict[str, str] = field(default_factory=dict)\n\n def __init__(\n self,\n trunk: nn.Module,\n heads: List[Dict],\n trunk_fields: List[Dict],\n handle_list_inputs=False,\n ) -> None:\n \"\"\"WARNING: handle_list_inputs is a hack which needs to be refactored away.\"\"\"\n super().__init__()\n\n self.trunk = trunk\n self.handle_list_inputs = handle_list_inputs\n\n # cast to HeadArgs for input validation\n heads = [self.HeadArgs(**head_dict) for head_dict in heads]\n # cast to TrunkFieldArgs for input validation\n trunk_fields = [\n self.TrunkFieldArgs(**trunk_fields_dict)\n for trunk_fields_dict in trunk_fields\n ]\n\n self.head_name_to_fork_module = {}\n self.heads = nn.ModuleList()\n self.head_input_keys = []\n self.head_output_keys = []\n self.head_fork_modules = []\n\n for head_args in heads:\n self.heads.append(head_args.head)\n self.head_input_keys.append(head_args.input_key)\n self.head_output_keys.append(head_args.output_key)\n self.head_fork_modules.append(head_args.fork_module)\n\n self.trunk_field_args = {}\n self.trunk_field_kwargs = {}\n for trunk_fields_elem in trunk_fields:\n input_key = trunk_fields_elem.input_key\n if input_key in self.trunk_field_args:\n raise KeyError(\n f\"Multiple trunk_fields specified for the same input_key: {input_key}\"\n )\n self.trunk_field_args[input_key] = trunk_fields_elem.args\n self.trunk_field_kwargs[input_key] = trunk_fields_elem.kwargs\n\n # outputs is used as a temporary storage of the head outputs\n self.outputs = {}\n\n # input_key is used to specify which key is currently being processed\n self.input_key = None\n\n # handles to the hooks which can be used for removing the hooks if needed\n self.hook_handles = []\n self._register_hooks()\n\n def _register_hooks(self):\n for i, head in enumerate(self.heads):\n fork_module_name = self.head_fork_modules[i]\n\n def hook_fn(\n module,\n module_in,\n module_out,\n # the following variables are passed as kwargs in the closure to avoid\n # late binding in python\n head_method=head,\n in_key=self.head_input_keys[i],\n out_key=self.head_output_keys[i],\n ):\n if in_key is not None and self.input_key != in_key:\n return\n if out_key is None:\n out_key = self.input_key\n if out_key in self.outputs:\n # reset state before raising\n self.outputs = {}\n self.input_key = None\n raise ValueError(\n f\"Two heads produced the same output key `{out_key}` during forward\"\n )\n self.outputs[out_key] = head_method(module_out)\n\n fork_module = self.trunk.get_submodule(fork_module_name)\n self.hook_handles.append(fork_module.register_forward_hook(hook_fn))\n\n def _get_trunk_fields(self):\n fields_args = self.trunk_field_args.get(self.input_key)\n fields_kwargs = self.trunk_field_kwargs.get(self.input_key)\n if fields_args is None:\n assert fields_kwargs is None\n fields_args = self.trunk_field_args.get(None)\n fields_kwargs = self.trunk_field_kwargs.get(None)\n if fields_args is None:\n assert fields_kwargs is None\n raise ValueError(\n f\"No trunk fields specified for input key: {self.input_key}\"\n )\n return fields_args, fields_kwargs\n\n def forward_sub_batch(self, sub_batch, *args, **kwargs):\n assert isinstance(sub_batch, VisionSample), f\"Received {type(sub_batch)}\"\n fields_args, fields_kwargs = self._get_trunk_fields()\n sample_args = [getattr(sub_batch, arg) for arg in fields_args]\n sample_kwargs = {\n key: getattr(sub_batch, field) for key, field in fields_kwargs.items()\n }\n self.trunk(*sample_args, *args, **sample_kwargs, **kwargs)\n\n def forward(self, batch, *args, **kwargs) -> Dict:", "metadata": {"task_id": "facebookresearch--omnivore/10", "ground_truth": " assert isinstance(batch, Mapping)\n assert len(self.outputs) == 0\n for key, sub_batch in batch.items():\n self.input_key = key\n if self.handle_list_inputs and isinstance(sub_batch.vision, Sequence):\n # FIXME: this only handles list inputs for the field \"vision\"\n assert len(batch) == 1\n out_vals = []\n for e in sub_batch.vision:\n e_batch = copy.copy(sub_batch)\n e_batch.vision = e\n self.forward_sub_batch(e_batch, *args, **kwargs)\n assert len(self.outputs) == 1\n out_key, out_val = self.outputs.popitem()\n out_vals.append(out_val)\n return {out_key: torch.cat(out_vals)}\n else:\n self.forward_sub_batch(sub_batch, *args, **kwargs)\n outputs = self.outputs\n self.input_key = None\n self.outputs = {}\n return outputs\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "model", "model_wrappers.py"], "context_start_lineno": 0, "lineno": 210, "function_name": "forward"}, "groundtruth": " assert isinstance(batch, Mapping)\n assert len(self.outputs) == 0\n for key, sub_batch in batch.items():\n self.input_key = key\n if self.handle_list_inputs and isinstance(sub_batch.vision, Sequence):\n # FIXME: this only handles list inputs for the field \"vision\"\n assert len(batch) == 1\n out_vals = []\n for e in sub_batch.vision:\n e_batch = copy.copy(sub_batch)\n e_batch.vision = e\n self.forward_sub_batch(e_batch, *args, **kwargs)\n assert len(self.outputs) == 1\n out_key, out_val = self.outputs.popitem()\n out_vals.append(out_val)\n return {out_key: torch.cat(out_vals)}\n else:\n self.forward_sub_batch(sub_batch, *args, **kwargs)\n outputs = self.outputs\n self.input_key = None\n self.outputs = {}\n return outputs\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom dataclasses import dataclass, field, fields, is_dataclass, make_dataclass\nfrom typing import Any, Callable, Dict\n\nfrom torch.utils.data.dataloader import default_collate\n\n\n@dataclass\nclass Batch:\n # the following are per batch args which are passed to the trainer\n # and are set to reasonable defaults\n model_fwd_kwargs: Dict = field(default_factory=dict)\n accum_steps: int = 1\n\n\ndef create_batch_sample_cls(cls):\n \"\"\"Dynamically creates a dataclass which is a `Batch` and a `Sample`.\n\n This function also registers the class in globals() to make the class picklable.\n \"\"\"", "metadata": {"task_id": "facebookresearch--omnivore/11", "ground_truth": " cls_name = f\"{Batch.__name__}{cls.__name__}\"\n batch_sample_cls = make_dataclass(cls_name, fields=(), bases=(cls, Batch))\n batch_sample_cls.__module__ = __name__\n globals()[cls_name] = batch_sample_cls\n return cls\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "data", "api.py"], "context_start_lineno": 0, "lineno": 25, "function_name": "create_batch_sample_cls"}, "groundtruth": " cls_name = f\"{Batch.__name__}{cls.__name__}\"\n batch_sample_cls = make_dataclass(cls_name, fields=(), bases=(cls, Batch))\n batch_sample_cls.__module__ = __name__\n globals()[cls_name] = batch_sample_cls\n return cls\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport contextlib\nimport json\nimport logging\nimport math\nimport os\nimport sys\nimport time\nfrom collections import OrderedDict\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, List, Mapping, Optional, Sequence\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn as nn\nfrom hydra.utils import instantiate\nfrom iopath.common.file_io import g_pathmgr\nfrom omnivision.data.api import Sample\nfrom omnivision.data.concat_dataset import ConcatDataset\nfrom omnivision.data.torch_dataset import TorchDataset\nfrom omnivision.losses import wrap_base_loss\nfrom omnivision.optim import construct_optimizer\nfrom omnivision.utils.train import (\n AverageMeter,\n copy_data_to_device,\n get_amp_type,\n get_machine_local_and_dist_rank,\n get_resume_checkpoint,\n is_dist_avail_and_initialized,\n makedir,\n ProgressMeter,\n set_seeds,\n setup_distributed_backend,\n setup_logging,\n)\n\n\ndef chunk_batch_for_accum_steps(batch, accum_steps):\n return [get_chunk_from_data(batch, i, accum_steps) for i in range(accum_steps)]\n\n\ndef get_chunk_from_data(data, chunk_id, num_chunks):\n \"\"\"\n Recursively splits all the tensors inside the passed data object into num_chunks.\n \"\"\"\n if isinstance(data, torch.Tensor):\n assert len(data) % num_chunks == 0\n start = (len(data) // num_chunks) * chunk_id\n end = (len(data) // num_chunks) * (chunk_id + 1)\n return data[start:end]\n elif isinstance(data, Mapping):\n return {\n key: get_chunk_from_data(value, chunk_id, num_chunks)\n for key, value in data.items()\n }\n elif isinstance(data, Sequence):\n return [get_chunk_from_data(value, chunk_id, num_chunks) for value in data]\n elif isinstance(data, Sample):\n data_cls = type(data)\n data = data.__dict__\n return data_cls(**get_chunk_from_data(data, chunk_id, num_chunks))\n else:\n return data\n\n\n@dataclass\nclass OmnivisionOptimAMPConf:\n enabled: bool = False\n amp_dtype: str = \"float16\"\n\n\n@dataclass\nclass OmnivisionOptimConf:\n optimizer: torch.optim.Optimizer = None\n options: Optional[Dict[str, Any]] = None\n param_group_modifiers: Optional[List] = None\n amp: Optional[Dict[str, Any]] = None\n gradient_clip: Any = None\n\n def __post_init__(self):\n # amp", "metadata": {"task_id": "facebookresearch--omnivore/12", "ground_truth": " if not isinstance(self.amp, OmnivisionOptimAMPConf):\n if self.amp is None:\n self.amp = {}\n assert isinstance(self.amp, Mapping)\n self.amp = OmnivisionOptimAMPConf(**self.amp)\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "trainer", "omnivision_trainer.py"], "context_start_lineno": 0, "lineno": 86, "function_name": "__post_init__"}, "groundtruth": " if not isinstance(self.amp, OmnivisionOptimAMPConf):\n if self.amp is None:\n self.amp = {}\n assert isinstance(self.amp, Mapping)\n self.amp = OmnivisionOptimAMPConf(**self.amp)\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-ignore-all-errors\n\nimport fnmatch\nimport itertools\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom omegaconf import DictConfig, MISSING\n\nfrom . import LARS, OmniOptimizer\n\n\ndef create_lars_optimizer(params, opt, **lars_params):\n optim = hydra.utils.instantiate(opt, params=params)\n return LARS(optim, **lars_params)\n\n\ndef validate_param_group_params(param_groups, model):", "metadata": {"task_id": "facebookresearch--omnivore/13", "ground_truth": " parameters = [set(param_group[\"params\"]) for param_group in param_groups]\n model_parameters = {parameter for _, parameter in model.named_parameters()}\n for p1, p2 in itertools.permutations(parameters, 2):\n assert p1.isdisjoint(p2), \"Scheduler generated param_groups should be disjoint\"\n assert (\n set.union(*parameters) == model_parameters\n ), \"Scheduler generated param_groups include all parameters of the model\"\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "optimizer.py"], "context_start_lineno": 0, "lineno": 28, "function_name": "validate_param_group_params"}, "groundtruth": " parameters = [set(param_group[\"params\"]) for param_group in param_groups]\n model_parameters = {parameter for _, parameter in model.named_parameters()}\n for p1, p2 in itertools.permutations(parameters, 2):\n assert p1.isdisjoint(p2), \"Scheduler generated param_groups should be disjoint\"\n assert (\n set.union(*parameters) == model_parameters\n ), \"Scheduler generated param_groups include all parameters of the model\"\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-ignore-all-errors\n\nimport fnmatch\nimport itertools\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom omegaconf import DictConfig, MISSING\n\nfrom . import LARS, OmniOptimizer\n\n\ndef create_lars_optimizer(params, opt, **lars_params):\n optim = hydra.utils.instantiate(opt, params=params)\n return LARS(optim, **lars_params)\n\n\ndef validate_param_group_params(param_groups, model):\n parameters = [set(param_group[\"params\"]) for param_group in param_groups]\n model_parameters = {parameter for _, parameter in model.named_parameters()}\n for p1, p2 in itertools.permutations(parameters, 2):\n assert p1.isdisjoint(p2), \"Scheduler generated param_groups should be disjoint\"\n assert (\n set.union(*parameters) == model_parameters\n ), \"Scheduler generated param_groups include all parameters of the model\"\n\n\ndef unix_pattern_to_parameter_names(\n scheduler_cfg: DictConfig, model: nn.Module\n) -> Union[None, Set[str]]:", "metadata": {"task_id": "facebookresearch--omnivore/14", "ground_truth": " if \"param_names\" not in scheduler_cfg and \"module_cls_names\" not in scheduler_cfg:\n return None\n return unix_param_pattern_to_parameter_names(scheduler_cfg, model).union(\n unix_module_cls_pattern_to_parameter_names(scheduler_cfg, model)\n )\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "optimizer.py"], "context_start_lineno": 0, "lineno": 40, "function_name": "unix_pattern_to_parameter_names"}, "groundtruth": " if \"param_names\" not in scheduler_cfg and \"module_cls_names\" not in scheduler_cfg:\n return None\n return unix_param_pattern_to_parameter_names(scheduler_cfg, model).union(\n unix_module_cls_pattern_to_parameter_names(scheduler_cfg, model)\n )\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-ignore-all-errors\n\nimport fnmatch\nimport itertools\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom omegaconf import DictConfig, MISSING\n\nfrom . import LARS, OmniOptimizer\n\n\ndef create_lars_optimizer(params, opt, **lars_params):\n optim = hydra.utils.instantiate(opt, params=params)\n return LARS(optim, **lars_params)\n\n\ndef validate_param_group_params(param_groups, model):\n parameters = [set(param_group[\"params\"]) for param_group in param_groups]\n model_parameters = {parameter for _, parameter in model.named_parameters()}\n for p1, p2 in itertools.permutations(parameters, 2):\n assert p1.isdisjoint(p2), \"Scheduler generated param_groups should be disjoint\"\n assert (\n set.union(*parameters) == model_parameters\n ), \"Scheduler generated param_groups include all parameters of the model\"\n\n\ndef unix_pattern_to_parameter_names(\n scheduler_cfg: DictConfig, model: nn.Module\n) -> Union[None, Set[str]]:\n if \"param_names\" not in scheduler_cfg and \"module_cls_names\" not in scheduler_cfg:\n return None\n return unix_param_pattern_to_parameter_names(scheduler_cfg, model).union(\n unix_module_cls_pattern_to_parameter_names(scheduler_cfg, model)\n )\n\n\ndef get_full_parameter_name(module_name, param_name):", "metadata": {"task_id": "facebookresearch--omnivore/15", "ground_truth": " if module_name == \"\":\n return param_name\n return f\"{module_name}.{param_name}\"\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "optimizer.py"], "context_start_lineno": 0, "lineno": 48, "function_name": "get_full_parameter_name"}, "groundtruth": " if module_name == \"\":\n return param_name\n return f\"{module_name}.{param_name}\"\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-ignore-all-errors\n\nimport fnmatch\nimport itertools\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom omegaconf import DictConfig, MISSING\n\nfrom . import LARS, OmniOptimizer\n\n\ndef create_lars_optimizer(params, opt, **lars_params):\n optim = hydra.utils.instantiate(opt, params=params)\n return LARS(optim, **lars_params)\n\n\ndef validate_param_group_params(param_groups, model):\n parameters = [set(param_group[\"params\"]) for param_group in param_groups]\n model_parameters = {parameter for _, parameter in model.named_parameters()}\n for p1, p2 in itertools.permutations(parameters, 2):\n assert p1.isdisjoint(p2), \"Scheduler generated param_groups should be disjoint\"\n assert (\n set.union(*parameters) == model_parameters\n ), \"Scheduler generated param_groups include all parameters of the model\"\n\n\ndef unix_pattern_to_parameter_names(\n scheduler_cfg: DictConfig, model: nn.Module\n) -> Union[None, Set[str]]:\n if \"param_names\" not in scheduler_cfg and \"module_cls_names\" not in scheduler_cfg:\n return None\n return unix_param_pattern_to_parameter_names(scheduler_cfg, model).union(\n unix_module_cls_pattern_to_parameter_names(scheduler_cfg, model)\n )\n\n\ndef get_full_parameter_name(module_name, param_name):\n if module_name == \"\":\n return param_name\n return f\"{module_name}.{param_name}\"\n\n\ndef unix_module_cls_pattern_to_parameter_names(\n scheduler_cfg: DictConfig,\n model: nn.Module,\n) -> Union[None, Set[str]]:\n if \"module_cls_names\" not in scheduler_cfg:\n return set()\n module_cls_to_params = {}\n for module_name, module in model.named_modules():\n module_cls = type(module)\n module_cls_to_params.setdefault(module_cls, set())\n module_cls_to_params[module_cls] |= set(\n get_full_parameter_name(module_name, param_name)\n for param_name, _ in module.named_parameters()\n )\n parameter_names = []\n for module_cls_name in scheduler_cfg.module_cls_names:\n module_cls = hydra.utils.get_class(module_cls_name)\n matching_parameters = module_cls_to_params.get(module_cls, set())\n assert len(matching_parameters) > 0, (\n f\"Optimizer option for {scheduler_cfg.option} module_cls_name\"\n f\" {module_cls_name} does not match any classes in the model\"\n )\n logging.info(\n f\"Matches for module_cls_name [{module_cls_name}]: {matching_parameters} \"\n )\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\ndef unix_param_pattern_to_parameter_names(\n scheduler_cfg: DictConfig,\n model: nn.Module,\n) -> Union[None, Set[str]]:", "metadata": {"task_id": "facebookresearch--omnivore/16", "ground_truth": " if \"param_names\" not in scheduler_cfg:\n return set()\n all_parameter_names = {name for name, _ in model.named_parameters()}\n parameter_names = []\n for param_name in scheduler_cfg.param_names:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert len(matching_parameters) >= 1, (\n f\"Optimizer option for {scheduler_cfg.option} param_names {param_name} \"\n \"does not match any parameters in the model\"\n )\n logging.info(f\"Matches for param_name [{param_name}]: {matching_parameters}\")\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "optimizer.py"], "context_start_lineno": 0, "lineno": 86, "function_name": "unix_param_pattern_to_parameter_names"}, "groundtruth": " if \"param_names\" not in scheduler_cfg:\n return set()\n all_parameter_names = {name for name, _ in model.named_parameters()}\n parameter_names = []\n for param_name in scheduler_cfg.param_names:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert len(matching_parameters) >= 1, (\n f\"Optimizer option for {scheduler_cfg.option} param_names {param_name} \"\n \"does not match any parameters in the model\"\n )\n logging.info(f\"Matches for param_name [{param_name}]: {matching_parameters}\")\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-ignore-all-errors\n\nimport fnmatch\nimport itertools\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom omegaconf import DictConfig, MISSING\n\nfrom . import LARS, OmniOptimizer\n\n\ndef create_lars_optimizer(params, opt, **lars_params):\n optim = hydra.utils.instantiate(opt, params=params)\n return LARS(optim, **lars_params)\n\n\ndef validate_param_group_params(param_groups, model):\n parameters = [set(param_group[\"params\"]) for param_group in param_groups]\n model_parameters = {parameter for _, parameter in model.named_parameters()}\n for p1, p2 in itertools.permutations(parameters, 2):\n assert p1.isdisjoint(p2), \"Scheduler generated param_groups should be disjoint\"\n assert (\n set.union(*parameters) == model_parameters\n ), \"Scheduler generated param_groups include all parameters of the model\"\n\n\ndef unix_pattern_to_parameter_names(\n scheduler_cfg: DictConfig, model: nn.Module\n) -> Union[None, Set[str]]:\n if \"param_names\" not in scheduler_cfg and \"module_cls_names\" not in scheduler_cfg:\n return None\n return unix_param_pattern_to_parameter_names(scheduler_cfg, model).union(\n unix_module_cls_pattern_to_parameter_names(scheduler_cfg, model)\n )\n\n\ndef get_full_parameter_name(module_name, param_name):\n if module_name == \"\":\n return param_name\n return f\"{module_name}.{param_name}\"\n\n\ndef unix_module_cls_pattern_to_parameter_names(\n scheduler_cfg: DictConfig,\n model: nn.Module,\n) -> Union[None, Set[str]]:\n if \"module_cls_names\" not in scheduler_cfg:\n return set()\n module_cls_to_params = {}\n for module_name, module in model.named_modules():\n module_cls = type(module)\n module_cls_to_params.setdefault(module_cls, set())\n module_cls_to_params[module_cls] |= set(\n get_full_parameter_name(module_name, param_name)\n for param_name, _ in module.named_parameters()\n )\n parameter_names = []\n for module_cls_name in scheduler_cfg.module_cls_names:\n module_cls = hydra.utils.get_class(module_cls_name)\n matching_parameters = module_cls_to_params.get(module_cls, set())\n assert len(matching_parameters) > 0, (\n f\"Optimizer option for {scheduler_cfg.option} module_cls_name\"\n f\" {module_cls_name} does not match any classes in the model\"\n )\n logging.info(\n f\"Matches for module_cls_name [{module_cls_name}]: {matching_parameters} \"\n )\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\ndef unix_param_pattern_to_parameter_names(\n scheduler_cfg: DictConfig,\n model: nn.Module,\n) -> Union[None, Set[str]]:\n if \"param_names\" not in scheduler_cfg:\n return set()\n all_parameter_names = {name for name, _ in model.named_parameters()}\n parameter_names = []\n for param_name in scheduler_cfg.param_names:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert len(matching_parameters) >= 1, (\n f\"Optimizer option for {scheduler_cfg.option} param_names {param_name} \"\n \"does not match any parameters in the model\"\n )\n logging.info(f\"Matches for param_name [{param_name}]: {matching_parameters}\")\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\ndef set_default_parameters(\n scheduler_cfgs: List[DictConfig], all_parameter_names: Set[str]\n) -> None:", "metadata": {"task_id": "facebookresearch--omnivore/17", "ground_truth": " constraints = [\n scheduler_cfg.parameter_names\n for scheduler_cfg in scheduler_cfgs\n if scheduler_cfg.parameter_names is not None\n ]\n if len(constraints) == 0:\n default_params = set(all_parameter_names)\n else:\n\n default_params = all_parameter_names - set.union(*constraints)\n default_count = 0\n for scheduler_cfg in scheduler_cfgs:\n if scheduler_cfg.parameter_names is None:\n scheduler_cfg.parameter_names = default_params\n default_count += 1\n assert default_count <= 1, \"Only one scheduler per option can be default\"\n if default_count == 0: # Add defaults without options\n scheduler_cfgs.append({\"parameter_names\": default_params})\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "optimizer.py"], "context_start_lineno": 0, "lineno": 104, "function_name": "set_default_parameters"}, "groundtruth": " constraints = [\n scheduler_cfg.parameter_names\n for scheduler_cfg in scheduler_cfgs\n if scheduler_cfg.parameter_names is not None\n ]\n if len(constraints) == 0:\n default_params = set(all_parameter_names)\n else:\n\n default_params = all_parameter_names - set.union(*constraints)\n default_count = 0\n for scheduler_cfg in scheduler_cfgs:\n if scheduler_cfg.parameter_names is None:\n scheduler_cfg.parameter_names = default_params\n default_count += 1\n assert default_count <= 1, \"Only one scheduler per option can be default\"\n if default_count == 0: # Add defaults without options\n scheduler_cfgs.append({\"parameter_names\": default_params})\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-ignore-all-errors\n\nimport fnmatch\nimport itertools\nimport logging\nfrom dataclasses import dataclass\nfrom typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union\n\nimport hydra\nimport torch\nimport torch.nn as nn\nfrom omegaconf import DictConfig, MISSING\n\nfrom . import LARS, OmniOptimizer\n\n\ndef create_lars_optimizer(params, opt, **lars_params):\n optim = hydra.utils.instantiate(opt, params=params)\n return LARS(optim, **lars_params)\n\n\ndef validate_param_group_params(param_groups, model):\n parameters = [set(param_group[\"params\"]) for param_group in param_groups]\n model_parameters = {parameter for _, parameter in model.named_parameters()}\n for p1, p2 in itertools.permutations(parameters, 2):\n assert p1.isdisjoint(p2), \"Scheduler generated param_groups should be disjoint\"\n assert (\n set.union(*parameters) == model_parameters\n ), \"Scheduler generated param_groups include all parameters of the model\"\n\n\ndef unix_pattern_to_parameter_names(\n scheduler_cfg: DictConfig, model: nn.Module\n) -> Union[None, Set[str]]:\n if \"param_names\" not in scheduler_cfg and \"module_cls_names\" not in scheduler_cfg:\n return None\n return unix_param_pattern_to_parameter_names(scheduler_cfg, model).union(\n unix_module_cls_pattern_to_parameter_names(scheduler_cfg, model)\n )\n\n\ndef get_full_parameter_name(module_name, param_name):\n if module_name == \"\":\n return param_name\n return f\"{module_name}.{param_name}\"\n\n\ndef unix_module_cls_pattern_to_parameter_names(\n scheduler_cfg: DictConfig,\n model: nn.Module,\n) -> Union[None, Set[str]]:\n if \"module_cls_names\" not in scheduler_cfg:\n return set()\n module_cls_to_params = {}\n for module_name, module in model.named_modules():\n module_cls = type(module)\n module_cls_to_params.setdefault(module_cls, set())\n module_cls_to_params[module_cls] |= set(\n get_full_parameter_name(module_name, param_name)\n for param_name, _ in module.named_parameters()\n )\n parameter_names = []\n for module_cls_name in scheduler_cfg.module_cls_names:\n module_cls = hydra.utils.get_class(module_cls_name)\n matching_parameters = module_cls_to_params.get(module_cls, set())\n assert len(matching_parameters) > 0, (\n f\"Optimizer option for {scheduler_cfg.option} module_cls_name\"\n f\" {module_cls_name} does not match any classes in the model\"\n )\n logging.info(\n f\"Matches for module_cls_name [{module_cls_name}]: {matching_parameters} \"\n )\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\ndef unix_param_pattern_to_parameter_names(\n scheduler_cfg: DictConfig,\n model: nn.Module,\n) -> Union[None, Set[str]]:\n if \"param_names\" not in scheduler_cfg:\n return set()\n all_parameter_names = {name for name, _ in model.named_parameters()}\n parameter_names = []\n for param_name in scheduler_cfg.param_names:\n matching_parameters = set(fnmatch.filter(all_parameter_names, param_name))\n assert len(matching_parameters) >= 1, (\n f\"Optimizer option for {scheduler_cfg.option} param_names {param_name} \"\n \"does not match any parameters in the model\"\n )\n logging.info(f\"Matches for param_name [{param_name}]: {matching_parameters}\")\n parameter_names.append(matching_parameters)\n return set.union(*parameter_names)\n\n\ndef set_default_parameters(\n scheduler_cfgs: List[DictConfig], all_parameter_names: Set[str]\n) -> None:\n constraints = [\n scheduler_cfg.parameter_names\n for scheduler_cfg in scheduler_cfgs\n if scheduler_cfg.parameter_names is not None\n ]\n if len(constraints) == 0:\n default_params = set(all_parameter_names)\n else:\n\n default_params = all_parameter_names - set.union(*constraints)\n default_count = 0\n for scheduler_cfg in scheduler_cfgs:\n if scheduler_cfg.parameter_names is None:\n scheduler_cfg.parameter_names = default_params\n default_count += 1\n assert default_count <= 1, \"Only one scheduler per option can be default\"\n if default_count == 0: # Add defaults without options\n scheduler_cfgs.append({\"parameter_names\": default_params})\n\n\ndef name_constraints_to_parameters(\n param_constraints: List[Set[str]], model: torch.nn.Module\n) -> List[torch.nn.Parameter]:\n matching_names = set.intersection(*param_constraints)\n return [value for name, value in model.named_parameters() if name in matching_names]\n\n\ndef map_scheduler_cfgs_to_param_groups(\n scheduler_cfgs_per_param_group: Iterable[List[Dict]], model: torch.nn.Module\n) -> Tuple[List[Dict[Any, Any]], List[Dict[str, List[torch.nn.Parameter]]]]:", "metadata": {"task_id": "facebookresearch--omnivore/18", "ground_truth": " schedulers = []\n param_groups = []\n for scheduler_cfgs in scheduler_cfgs_per_param_group:\n param_constraints = [\n scheduler_cfg[\"parameter_names\"] for scheduler_cfg in scheduler_cfgs\n ]\n matching_parameters = name_constraints_to_parameters(param_constraints, model)\n if len(matching_parameters) == 0: # If no overlap of parameters, skip\n continue\n schedulers_for_group = {\n scheduler_cfg[\"option\"]: scheduler_cfg[\"scheduler\"]\n for scheduler_cfg in scheduler_cfgs\n if \"option\" in scheduler_cfg\n }\n schedulers.append(schedulers_for_group)\n param_groups.append({\"params\": matching_parameters})\n return schedulers, param_groups\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "optimizer.py"], "context_start_lineno": 0, "lineno": 134, "function_name": "map_scheduler_cfgs_to_param_groups"}, "groundtruth": " schedulers = []\n param_groups = []\n for scheduler_cfgs in scheduler_cfgs_per_param_group:\n param_constraints = [\n scheduler_cfg[\"parameter_names\"] for scheduler_cfg in scheduler_cfgs\n ]\n matching_parameters = name_constraints_to_parameters(param_constraints, model)\n if len(matching_parameters) == 0: # If no overlap of parameters, skip\n continue\n schedulers_for_group = {\n scheduler_cfg[\"option\"]: scheduler_cfg[\"scheduler\"]\n for scheduler_cfg in scheduler_cfgs\n if \"option\" in scheduler_cfg\n }\n schedulers.append(schedulers_for_group)\n param_groups.append({\"params\": matching_parameters})\n return schedulers, param_groups\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nclass OmniOptimizer(object):\n def __init__(self, optimizer, schedulers=None) -> None:", "metadata": {"task_id": "facebookresearch--omnivore/19", "ground_truth": " self.optimizer = optimizer\n self.schedulers = schedulers\n self._validate_optimizer_schedulers()\n self.step_schedulers(0.0)\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "omni_optimizer.py"], "context_start_lineno": 0, "lineno": 9, "function_name": "__init__"}, "groundtruth": " self.optimizer = optimizer\n self.schedulers = schedulers\n self._validate_optimizer_schedulers()\n self.step_schedulers(0.0)\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nclass OmniOptimizer(object):\n def __init__(self, optimizer, schedulers=None) -> None:\n self.optimizer = optimizer\n self.schedulers = schedulers\n self._validate_optimizer_schedulers()\n self.step_schedulers(0.0)\n\n def _validate_optimizer_schedulers(self):", "metadata": {"task_id": "facebookresearch--omnivore/20", "ground_truth": " if self.schedulers is None:\n return\n for _, set_of_schedulers in enumerate(self.schedulers):\n for option, _ in set_of_schedulers.items():\n assert option in self.optimizer.defaults, (\n \"Optimizer option \"\n f\"{option} not found in {self.optimizer}. Valid options are \"\n f\"{self.optimizer.defaults.keys()}\"\n )\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "omni_optimizer.py"], "context_start_lineno": 0, "lineno": 15, "function_name": "_validate_optimizer_schedulers"}, "groundtruth": " if self.schedulers is None:\n return\n for _, set_of_schedulers in enumerate(self.schedulers):\n for option, _ in set_of_schedulers.items():\n assert option in self.optimizer.defaults, (\n \"Optimizer option \"\n f\"{option} not found in {self.optimizer}. Valid options are \"\n f\"{self.optimizer.defaults.keys()}\"\n )\n"} +{"prompt": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nclass OmniOptimizer(object):\n def __init__(self, optimizer, schedulers=None) -> None:\n self.optimizer = optimizer\n self.schedulers = schedulers\n self._validate_optimizer_schedulers()\n self.step_schedulers(0.0)\n\n def _validate_optimizer_schedulers(self):\n if self.schedulers is None:\n return\n for _, set_of_schedulers in enumerate(self.schedulers):\n for option, _ in set_of_schedulers.items():\n assert option in self.optimizer.defaults, (\n \"Optimizer option \"\n f\"{option} not found in {self.optimizer}. Valid options are \"\n f\"{self.optimizer.defaults.keys()}\"\n )\n\n def step_schedulers(self, where: float) -> None:", "metadata": {"task_id": "facebookresearch--omnivore/21", "ground_truth": " if self.schedulers is None:\n return\n for i, param_group in enumerate(self.optimizer.param_groups):\n for option, scheduler in self.schedulers[i].items():\n new_value = scheduler(where)\n param_group[option] = new_value\n", "fpath_tuple": ["facebookresearch_omnivore", "omnivision", "optim", "omni_optimizer.py"], "context_start_lineno": 0, "lineno": 26, "function_name": "step_schedulers"}, "groundtruth": " if self.schedulers is None:\n return\n for i, param_group in enumerate(self.optimizer.param_groups):\n for option, scheduler in self.schedulers[i].items():\n new_value = scheduler(where)\n param_group[option] = new_value\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import Column, Func, PandasDataFrame\n\n\ndef mutate(df: PandasDataFrame, over: dict[Column, Func]) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/0", "ground_truth": " _check_type(over, dict)\n df = df.copy()\n for column, mutation in over.items():\n df[column] = df.apply(mutation, axis=1)\n return df # type: ignore\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "mutate.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "mutate"}, "groundtruth": " _check_type(over, dict)\n df = df.copy()\n for column, mutation in over.items():\n df[column] = df.apply(mutation, axis=1)\n return df # type: ignore\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import LazyColumns, PandasDataFrame\n\n\ndef drop(df: PandasDataFrame, columns: LazyColumns) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/1", "ground_truth": " _check_type(columns, {list, str})\n df = df.drop(columns, axis=1)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "drop.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "drop"}, "groundtruth": " _check_type(columns, {list, str})\n df = df.drop(columns, axis=1)\n return df\n"} +{"prompt": "from ..checks import _check_type, _check_values\nfrom ..types import NewColumn, OldColumn, PandasDataFrame\n\n\ndef rename(df: PandasDataFrame, columns: dict[OldColumn, NewColumn]) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/2", "ground_truth": " _check_type(columns, dict)\n cv = columns.values()\n _check_values(cv, str)\n if len(set(cv)) != len(cv):\n raise KeyError(\"columns must be unique\")\n missing_keys = set(columns.keys()) - set(df.columns)\n if missing_keys and len(missing_keys) == 1:\n raise KeyError(f\"column key ({missing_keys}) is invalid\")\n if missing_keys and len(missing_keys) > 1:\n raise KeyError(f\"column keys ({missing_keys}) are invalid\")\n df = df.rename(columns=columns)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "rename.py"], "context_start_lineno": 0, "lineno": 5, "function_name": "rename"}, "groundtruth": " _check_type(columns, dict)\n cv = columns.values()\n _check_values(cv, str)\n if len(set(cv)) != len(cv):\n raise KeyError(\"columns must be unique\")\n missing_keys = set(columns.keys()) - set(df.columns)\n if missing_keys and len(missing_keys) == 1:\n raise KeyError(f\"column key ({missing_keys}) is invalid\")\n if missing_keys and len(missing_keys) > 1:\n raise KeyError(f\"column keys ({missing_keys}) are invalid\")\n df = df.rename(columns=columns)\n return df\n"} +{"prompt": "import uuid\n\nfrom ..checks import _check_type\nfrom ..types import Column, Columns, PandasDataFrame\n\n\ndef split(\n df: PandasDataFrame, column: Column, into: Columns, sep: str, drop: bool = True\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/3", "ground_truth": " _check_type(column, str)\n _check_type(into, list)\n _check_type(sep, str)\n _check_type(drop, bool)\n if len(into) != len(set(into)):\n raise KeyError(\"into keys must be unique\")\n if (column in into) and (not drop):\n raise KeyError(\"into keys must be unique\")\n bad_keys = set(df.columns).difference(set([column])).intersection(set(into))\n if bad_keys:\n raise KeyError(\"into keys must be unique\")\n columns = {uuid.uuid4().hex: col for col in into}\n temp = list(columns.keys())\n df = df.copy()\n df[temp] = df[column].str.split(sep, expand=True)\n if drop:\n df = df.drop(column, axis=1)\n df = df.rename(columns=columns)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "split.py"], "context_start_lineno": 0, "lineno": 9, "function_name": "split"}, "groundtruth": " _check_type(column, str)\n _check_type(into, list)\n _check_type(sep, str)\n _check_type(drop, bool)\n if len(into) != len(set(into)):\n raise KeyError(\"into keys must be unique\")\n if (column in into) and (not drop):\n raise KeyError(\"into keys must be unique\")\n bad_keys = set(df.columns).difference(set([column])).intersection(set(into))\n if bad_keys:\n raise KeyError(\"into keys must be unique\")\n columns = {uuid.uuid4().hex: col for col in into}\n temp = list(columns.keys())\n df = df.copy()\n df[temp] = df[column].str.split(sep, expand=True)\n if drop:\n df = df.drop(column, axis=1)\n df = df.rename(columns=columns)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_keys, _check_type\nfrom ..types import LazyColumns, PandasDataFrame\n\n\ndef sort(\n df: PandasDataFrame, columns: LazyColumns, descending: bool = False\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/4", "ground_truth": " _check_type(columns, {list, str})\n _check_type(descending, bool)\n _check_keys(columns, df.columns)\n df = df.sort_values(by=columns, ascending=not descending)\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "sort.py"], "context_start_lineno": 0, "lineno": 9, "function_name": "sort"}, "groundtruth": " _check_type(columns, {list, str})\n _check_type(descending, bool)\n _check_keys(columns, df.columns)\n df = df.sort_values(by=columns, ascending=not descending)\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import Column, PandasDataFrame, PandasGroupedFrame\n\n\ndef pack(\n df: PandasDataFrame | PandasGroupedFrame, column: Column, sep: str\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/5", "ground_truth": " _check_type(column, str)\n _check_type(sep, str)\n order = df.obj.columns if isinstance(df, PandasGroupedFrame) else df.columns # type: ignore\n df = df.agg(**{column: (column, lambda x: x.astype(str).str.cat(sep=sep))}) # type: ignore\n df = df[[col for col in df.columns if col in order]]\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "pack.py"], "context_start_lineno": 0, "lineno": 9, "function_name": "pack"}, "groundtruth": " _check_type(column, str)\n _check_type(sep, str)\n order = df.obj.columns if isinstance(df, PandasGroupedFrame) else df.columns # type: ignore\n df = df.agg(**{column: (column, lambda x: x.astype(str).str.cat(sep=sep))}) # type: ignore\n df = df[[col for col in df.columns if col in order]]\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import Column, Func, PandasDataFrame, PandasGroupedFrame\n\n\ndef rollup(\n df: PandasDataFrame | PandasGroupedFrame,\n over: dict[Column, tuple[Column, Func]],\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/6", "ground_truth": " _check_type(over, dict)\n if isinstance(df, PandasGroupedFrame):\n groups = set(df.grouper.names) # type: ignore\n keys = set(over.keys())\n if groups.intersection(keys):\n raise KeyError(\"unable to overwrite group keys\")\n df = df.agg(**over)\n df = df.reset_index(drop=True)\n else:\n df = df.agg(**over) # type: ignore\n df = df.T # type: ignore\n df = df.reset_index(drop=True) # type: ignore\n df = df.fillna(method=\"ffill\") # type: ignore\n df = df.fillna(method=\"bfill\") # type: ignore\n df = df.head(1) # type: ignore\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "rollup.py"], "context_start_lineno": 0, "lineno": 10, "function_name": "rollup"}, "groundtruth": " _check_type(over, dict)\n if isinstance(df, PandasGroupedFrame):\n groups = set(df.grouper.names) # type: ignore\n keys = set(over.keys())\n if groups.intersection(keys):\n raise KeyError(\"unable to overwrite group keys\")\n df = df.agg(**over)\n df = df.reset_index(drop=True)\n else:\n df = df.agg(**over) # type: ignore\n df = df.T # type: ignore\n df = df.reset_index(drop=True) # type: ignore\n df = df.fillna(method=\"ffill\") # type: ignore\n df = df.fillna(method=\"bfill\") # type: ignore\n df = df.head(1) # type: ignore\n return df\n"} +{"prompt": "import pandas as pd # pyright: ignore[reportMissingImports]\n\nfrom ..types import PandasDataFrame\n\n\ndef append(top: PandasDataFrame, bottom: PandasDataFrame) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/7", "ground_truth": " df = pd.concat([top, bottom])\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "append.py"], "context_start_lineno": 0, "lineno": 6, "function_name": "append"}, "groundtruth": " df = pd.concat([top, bottom])\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nimport warnings\n\nfrom ..checks import _check_type\nfrom ..types import Column, Columns, PandasDataFrame\n\n\ndef combine(\n df: PandasDataFrame, columns: Columns, into: Column, sep: str, drop: bool = True\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/8", "ground_truth": " _check_type(columns, list)\n _check_type(into, str)\n _check_type(sep, str)\n _check_type(drop, bool)\n into_is_in_columns = into in columns\n into_is_not_in_columns = not into_is_in_columns\n into_is_in_df_columns = into in df.columns\n if into_is_not_in_columns and into_is_in_df_columns:\n message = f\"overwriting existing column '{into}'\"\n warnings.warn(message)\n df = df.copy()\n new = df[columns].apply(lambda row: sep.join(row.values.astype(str)), axis=1)\n if drop:\n df = df.drop(columns, axis=1)\n df[into] = new\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "combine.py"], "context_start_lineno": 0, "lineno": 11, "function_name": "combine"}, "groundtruth": " _check_type(columns, list)\n _check_type(into, str)\n _check_type(sep, str)\n _check_type(drop, bool)\n into_is_in_columns = into in columns\n into_is_not_in_columns = not into_is_in_columns\n into_is_in_df_columns = into in df.columns\n if into_is_not_in_columns and into_is_in_df_columns:\n message = f\"overwriting existing column '{into}'\"\n warnings.warn(message)\n df = df.copy()\n new = df[columns].apply(lambda row: sep.join(row.values.astype(str)), axis=1)\n if drop:\n df = df.drop(columns, axis=1)\n df[into] = new\n return df\n"} +{"prompt": "from ..checks import _check_type\nfrom ..types import Column, NewValue, OldValue, PandasDataFrame\n\n\ndef replace(\n df: PandasDataFrame, over: dict[Column, dict[OldValue, NewValue]]\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/9", "ground_truth": " _check_type(over, dict)\n bad_columns = list(set(over.keys()) - set(df.columns))\n if bad_columns and len(bad_columns) == 1:\n raise KeyError(f\"column key: {bad_columns} is invalid\")\n if bad_columns and len(bad_columns) > 1:\n raise KeyError(f\"column keys: {bad_columns} are invalid\")\n df = df.replace(over)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "replace.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "replace"}, "groundtruth": " _check_type(over, dict)\n bad_columns = list(set(over.keys()) - set(df.columns))\n if bad_columns and len(bad_columns) == 1:\n raise KeyError(f\"column key: {bad_columns} is invalid\")\n if bad_columns and len(bad_columns) > 1:\n raise KeyError(f\"column keys: {bad_columns} are invalid\")\n df = df.replace(over)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import LazyColumns, PandasDataFrame\n\n\ndef denix(df: PandasDataFrame, columns: LazyColumns | None = None) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/10", "ground_truth": " _check_type(columns, {list, str, None})\n columns = [columns] if isinstance(columns, str) else columns\n if isinstance(columns, list):\n bad_keys = set(columns).difference(df.columns)\n if bad_keys:\n if len(bad_keys) == 1:\n message = f\"columns argument contains invalid key {bad_keys}\"\n else:\n message = f\"columns argument contains invalid keys {bad_keys}\"\n raise KeyError(message)\n df = df.dropna(subset=columns)\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "denix.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "denix"}, "groundtruth": " _check_type(columns, {list, str, None})\n columns = [columns] if isinstance(columns, str) else columns\n if isinstance(columns, list):\n bad_keys = set(columns).difference(df.columns)\n if bad_keys:\n if len(bad_keys) == 1:\n message = f\"columns argument contains invalid key {bad_keys}\"\n else:\n message = f\"columns argument contains invalid keys {bad_keys}\"\n raise KeyError(message)\n df = df.dropna(subset=columns)\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_keys, _check_type\nfrom ..types import LazyColumns, PandasDataFrame\n\n\ndef dedupe(df: PandasDataFrame, columns: LazyColumns | None = None) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/11", "ground_truth": " _check_type(columns, {list, str, None})\n _check_keys(columns, df.columns)\n df = df.drop_duplicates(subset=columns, keep=\"first\")\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "dedupe.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "dedupe"}, "groundtruth": " _check_type(columns, {list, str, None})\n _check_keys(columns, df.columns)\n df = df.drop_duplicates(subset=columns, keep=\"first\")\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nimport warnings\n\nfrom ..checks import _check_type\nfrom ..types import Column, PandasDataFrame, PandasGroupedFrame\n\n\ndef accumulate(\n df: PandasDataFrame | PandasGroupedFrame, column: Column, into: Column\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/12", "ground_truth": " _check_type(column, str)\n _check_type(into, str)\n if isinstance(df, PandasDataFrame):\n into_is_not_column = into != column\n into_is_in_df_columns = into in df.columns\n if into_is_not_column and into_is_in_df_columns:\n message = f\"overwriting existing column '{into}'\"\n warnings.warn(message)\n df = df.copy()\n result = df[column].cumsum()\n if isinstance(df, PandasGroupedFrame):\n df = df.obj.copy() # type: ignore\n df[into] = result # type: ignore\n return df # type: ignore\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "accumulate.py"], "context_start_lineno": 0, "lineno": 11, "function_name": "accumulate"}, "groundtruth": " _check_type(column, str)\n _check_type(into, str)\n if isinstance(df, PandasDataFrame):\n into_is_not_column = into != column\n into_is_in_df_columns = into in df.columns\n if into_is_not_column and into_is_in_df_columns:\n message = f\"overwriting existing column '{into}'\"\n warnings.warn(message)\n df = df.copy()\n result = df[column].cumsum()\n if isinstance(df, PandasGroupedFrame):\n df = df.obj.copy() # type: ignore\n df[into] = result # type: ignore\n return df # type: ignore\n"} +{"prompt": "from __future__ import annotations\n\nimport pandas as pd # pyright: ignore[reportMissingImports]\n\nfrom ..checks import _check_type\nfrom ..types import PandasDataFrame\n\n\ndef cross(\n lhs: PandasDataFrame,\n rhs: PandasDataFrame,\n postfix: tuple[str, str] = (\"_lhs\", \"_rhs\"),\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/13", "ground_truth": " _check_type(postfix, tuple)\n df = pd.merge(lhs, rhs, how=\"cross\", suffixes=postfix)\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "cross.py"], "context_start_lineno": 0, "lineno": 13, "function_name": "cross"}, "groundtruth": " _check_type(postfix, tuple)\n df = pd.merge(lhs, rhs, how=\"cross\", suffixes=postfix)\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import PandasDataFrame\n\n\ndef sample(\n df: PandasDataFrame, rows: int | float, seed: int | None = None\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/14", "ground_truth": " _check_type(rows, {int, float})\n if rows >= 1:\n if isinstance(rows, float):\n raise ValueError(\"must be int if > 1\")\n df = df.sample(rows, random_state=seed)\n elif 0 < rows < 1:\n df = df.sample(frac=rows, random_state=seed)\n else:\n raise ValueError(\"must be > 0\")\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "sample.py"], "context_start_lineno": 0, "lineno": 9, "function_name": "sample"}, "groundtruth": " _check_type(rows, {int, float})\n if rows >= 1:\n if isinstance(rows, float):\n raise ValueError(\"must be int if > 1\")\n df = df.sample(rows, random_state=seed)\n elif 0 < rows < 1:\n df = df.sample(frac=rows, random_state=seed)\n else:\n raise ValueError(\"must be > 0\")\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import PandasDataFrame\n\n\ndef shuffle(df: PandasDataFrame, seed: int | None = None) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/15", "ground_truth": " _check_type(seed, {int, None})\n df = df.sample(frac=1, random_state=seed)\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "shuffle.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "shuffle"}, "groundtruth": " _check_type(seed, {int, None})\n df = df.sample(frac=1, random_state=seed)\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import Column, PandasDataFrame\n\n\ndef unpack(df: PandasDataFrame, column: Column, sep: str) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/16", "ground_truth": " _check_type(column, str)\n _check_type(sep, str)\n df = df.assign(**{column: df[column].str.split(sep)})\n df = df.explode(column)\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "unpack.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "unpack"}, "groundtruth": " _check_type(column, str)\n _check_type(sep, str)\n df = df.assign(**{column: df[column].str.split(sep)})\n df = df.explode(column)\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nimport warnings\n\nfrom ..checks import _check_type\nfrom ..types import Column, PandasDataFrame, PandasGroupedFrame\n\n\ndef rank(\n df: PandasDataFrame | PandasGroupedFrame,\n column: Column,\n into: Column,\n descending: bool = False,\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/17", "ground_truth": " _check_type(column, str)\n _check_type(into, str)\n _check_type(descending, bool)\n if isinstance(df, PandasDataFrame):\n into_is_not_column = into != column\n into_is_in_df_columns = into in df.columns\n if into_is_not_column and into_is_in_df_columns:\n message = f\"overwriting existing column '{into}'\"\n warnings.warn(message)\n df = df.copy()\n result = df[column].rank(method=\"dense\", ascending=not descending)\n if isinstance(df, PandasGroupedFrame):\n df = df.obj.copy() # type: ignore\n df[into] = result # type: ignore\n return df # type: ignore\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "rank.py"], "context_start_lineno": 0, "lineno": 14, "function_name": "rank"}, "groundtruth": " _check_type(column, str)\n _check_type(into, str)\n _check_type(descending, bool)\n if isinstance(df, PandasDataFrame):\n into_is_not_column = into != column\n into_is_in_df_columns = into in df.columns\n if into_is_not_column and into_is_in_df_columns:\n message = f\"overwriting existing column '{into}'\"\n warnings.warn(message)\n df = df.copy()\n result = df[column].rank(method=\"dense\", ascending=not descending)\n if isinstance(df, PandasGroupedFrame):\n df = df.obj.copy() # type: ignore\n df[into] = result # type: ignore\n return df # type: ignore\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import Direction, LazyColumns, PandasDataFrame, Value\n\n\ndef fill(\n df: PandasDataFrame,\n columns: LazyColumns | None = None,\n direction: Direction | None = None,\n constant: Value | None = None,\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/18", "ground_truth": " _check_type(columns, {list, str, None})\n _check_type(direction, {str, None})\n columns = [columns] if isinstance(columns, str) else columns\n if (direction != None) and (constant != None):\n raise ValueError(\"either direction OR constant must be None\")\n if (direction == None) and (constant == None):\n raise ValueError(\"either direction OR constant must not be None\")\n if direction != None:\n if not (direction in [\"down\", \"up\"]):\n raise ValueError(\"must be one of {'down', 'up'}\")\n method = {\"down\": \"ffill\", \"up\": \"bfill\"}.get(direction)\n value = None\n if constant != None:\n value = constant\n method = None\n df = df.copy()\n if columns:\n df[columns] = df[columns].fillna(value=value, method=method) # type: ignore\n else:\n df = df.fillna(value=value, method=method) # type: ignore\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "fill.py"], "context_start_lineno": 0, "lineno": 12, "function_name": "fill"}, "groundtruth": " _check_type(columns, {list, str, None})\n _check_type(direction, {str, None})\n columns = [columns] if isinstance(columns, str) else columns\n if (direction != None) and (constant != None):\n raise ValueError(\"either direction OR constant must be None\")\n if (direction == None) and (constant == None):\n raise ValueError(\"either direction OR constant must not be None\")\n if direction != None:\n if not (direction in [\"down\", \"up\"]):\n raise ValueError(\"must be one of {'down', 'up'}\")\n method = {\"down\": \"ffill\", \"up\": \"bfill\"}.get(direction)\n value = None\n if constant != None:\n value = constant\n method = None\n df = df.copy()\n if columns:\n df[columns] = df[columns].fillna(value=value, method=method) # type: ignore\n else:\n df = df.fillna(value=value, method=method) # type: ignore\n return df\n"} +{"prompt": "import uuid\n\nimport pandas as pd # pyright: ignore[reportMissingImports]\n\nfrom ..checks import _check_type\nfrom ..types import Column, PandasDataFrame\n\n\ndef spread(df: PandasDataFrame, column: Column, using: Column) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/19", "ground_truth": " _check_type(column, str)\n _check_type(using, str)\n if column == using:\n raise KeyError(\"column and using must be unique\")\n original_shape = df.shape[1]\n if original_shape == 2:\n temp = uuid.uuid4().hex\n df[temp] = df.groupby(column).cumcount()\n index = [col for col in df.columns if col not in [column, using]]\n df = pd.pivot_table(df, index=index, columns=[column], values=[using], aggfunc=\"first\") # type: ignore\n df.columns = [col for col in df.columns.get_level_values(1)] # type: ignore\n df = df.reset_index().rename_axis(None, axis=0)\n if original_shape == 2:\n df = df.drop(temp, axis=1) # type: ignore\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "spread.py"], "context_start_lineno": 0, "lineno": 9, "function_name": "spread"}, "groundtruth": " _check_type(column, str)\n _check_type(using, str)\n if column == using:\n raise KeyError(\"column and using must be unique\")\n original_shape = df.shape[1]\n if original_shape == 2:\n temp = uuid.uuid4().hex\n df[temp] = df.groupby(column).cumcount()\n index = [col for col in df.columns if col not in [column, using]]\n df = pd.pivot_table(df, index=index, columns=[column], values=[using], aggfunc=\"first\") # type: ignore\n df.columns = [col for col in df.columns.get_level_values(1)] # type: ignore\n df = df.reset_index().rename_axis(None, axis=0)\n if original_shape == 2:\n df = df.drop(temp, axis=1) # type: ignore\n return df\n"} +{"prompt": "from __future__ import annotations\n\nimport warnings\n\nimport pandas as pd # pyright: ignore[reportMissingImports]\n\nfrom ..checks import _check_type\nfrom ..types import Column, Columns, LazyColumns, PandasDataFrame, PandasGroupedFrame\n\n\ndef _melt(\n df: PandasDataFrame,\n cols_to_keep: list[str],\n cols_to_gather: list[str],\n into: tuple[str, str],\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/20", "ground_truth": " df = pd.melt(\n df,\n id_vars=cols_to_keep,\n value_vars=cols_to_gather,\n var_name=into[0],\n value_name=into[1],\n )\n df = df.dropna(subset=into[1]) # type: ignore\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "gather.py"], "context_start_lineno": 0, "lineno": 16, "function_name": "_melt"}, "groundtruth": " df = pd.melt(\n df,\n id_vars=cols_to_keep,\n value_vars=cols_to_gather,\n var_name=into[0],\n value_name=into[1],\n )\n df = df.dropna(subset=into[1]) # type: ignore\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nimport warnings\n\nimport pandas as pd # pyright: ignore[reportMissingImports]\n\nfrom ..checks import _check_type\nfrom ..types import Column, Columns, LazyColumns, PandasDataFrame, PandasGroupedFrame\n\n\ndef _melt(\n df: PandasDataFrame,\n cols_to_keep: list[str],\n cols_to_gather: list[str],\n into: tuple[str, str],\n) -> PandasDataFrame:\n df = pd.melt(\n df,\n id_vars=cols_to_keep,\n value_vars=cols_to_gather,\n var_name=into[0],\n value_name=into[1],\n )\n df = df.dropna(subset=into[1]) # type: ignore\n df = df.reset_index(drop=True)\n return df\n\n\ndef _grouped_melt(df: PandasGroupedFrame, into: tuple[str, str]) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/21", "ground_truth": " cols_to_keep = df.grouper.names # type: ignore\n cols_to_gather = [col for col in df.obj.columns if col not in cols_to_keep] # type: ignore\n df = _melt(df.obj, cols_to_keep, cols_to_gather, into) # type: ignore\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "gather.py"], "context_start_lineno": 0, "lineno": 29, "function_name": "_grouped_melt"}, "groundtruth": " cols_to_keep = df.grouper.names # type: ignore\n cols_to_gather = [col for col in df.obj.columns if col not in cols_to_keep] # type: ignore\n df = _melt(df.obj, cols_to_keep, cols_to_gather, into) # type: ignore\n return df\n"} +{"prompt": "from __future__ import annotations\n\nimport pandas as pd # pyright: ignore[reportMissingImports]\n\nfrom ..checks import _check_type\nfrom ..types import Join, LazyColumns, PandasDataFrame\n\n\ndef join(\n lhs: PandasDataFrame,\n rhs: PandasDataFrame,\n on: LazyColumns,\n how: Join = \"left\",\n postfix: tuple[str, str] = (\"_lhs\", \"_rhs\"),\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/22", "ground_truth": " _check_type(on, {list, str})\n _check_type(how, str)\n _check_type(postfix, tuple)\n if not how in [\"left\", \"right\", \"inner\", \"full\"]:\n message = (\n \"on argument is invalid, must be one of {'left', 'right', 'inner', 'full'}\"\n )\n raise ValueError(message)\n how = \"outer\" if (how == \"full\") else how # type: ignore\n df = pd.merge(lhs, rhs, on=on, how=how, suffixes=postfix)\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "join.py"], "context_start_lineno": 0, "lineno": 15, "function_name": "join"}, "groundtruth": " _check_type(on, {list, str})\n _check_type(how, str)\n _check_type(postfix, tuple)\n if not how in [\"left\", \"right\", \"inner\", \"full\"]:\n message = (\n \"on argument is invalid, must be one of {'left', 'right', 'inner', 'full'}\"\n )\n raise ValueError(message)\n how = \"outer\" if (how == \"full\") else how # type: ignore\n df = pd.merge(lhs, rhs, on=on, how=how, suffixes=postfix)\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from ..types import Func, PandasDataFrame\n\n\ndef filter(df: PandasDataFrame, func: Func) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/23", "ground_truth": " if not callable(func):\n raise TypeError(\"must be Func\")\n df = df.loc[func] # type: ignore\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "filter.py"], "context_start_lineno": 0, "lineno": 4, "function_name": "filter"}, "groundtruth": " if not callable(func):\n raise TypeError(\"must be Func\")\n df = df.loc[func] # type: ignore\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import PandasDataFrame, PandasGroupedFrame\n\n\ndef take(\n df: PandasDataFrame | PandasGroupedFrame, rows: int = 1, **kwargs\n) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/24", "ground_truth": " if kwargs: # compatibility: sklearn / train_test_split\n df = df.take(rows, **kwargs) # type: ignore\n df = df.reset_index(drop=True)\n return df\n _check_type(rows, int)\n if isinstance(df, PandasDataFrame):\n if rows > df.shape[0]:\n raise ValueError(\"rows argument is invalid, exceeds total size\")\n if rows == 0:\n raise ValueError(\"rows argument is invalid, must not be 0\")\n if rows <= -1:\n df = df.tail(rows * -1)\n else:\n df = df.head(rows)\n if isinstance(df, PandasGroupedFrame):\n df = df.reset_index()\n else:\n df = df.reset_index(drop=True)\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "take.py"], "context_start_lineno": 0, "lineno": 9, "function_name": "take"}, "groundtruth": " if kwargs: # compatibility: sklearn / train_test_split\n df = df.take(rows, **kwargs) # type: ignore\n df = df.reset_index(drop=True)\n return df\n _check_type(rows, int)\n if isinstance(df, PandasDataFrame):\n if rows > df.shape[0]:\n raise ValueError(\"rows argument is invalid, exceeds total size\")\n if rows == 0:\n raise ValueError(\"rows argument is invalid, must not be 0\")\n if rows <= -1:\n df = df.tail(rows * -1)\n else:\n df = df.head(rows)\n if isinstance(df, PandasGroupedFrame):\n df = df.reset_index()\n else:\n df = df.reset_index(drop=True)\n return df\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_type\nfrom ..types import LazyColumns, PandasDataFrame, PandasGroupedFrame\n\n\ndef group(df: PandasDataFrame, by: LazyColumns) -> PandasGroupedFrame:", "metadata": {"task_id": "maxhumber--redframes/25", "ground_truth": " _check_type(by, {list, str})\n gdf = df.groupby(by, as_index=False, sort=False)\n return gdf\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "group.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "group"}, "groundtruth": " _check_type(by, {list, str})\n gdf = df.groupby(by, as_index=False, sort=False)\n return gdf\n"} +{"prompt": "import pandas as pd # pyright: ignore[reportMissingImports]\n\nfrom ..checks import _check_type\nfrom ..types import LazyColumns, PandasDataFrame\n\n\ndef select(df: PandasDataFrame, columns: LazyColumns) -> PandasDataFrame:", "metadata": {"task_id": "maxhumber--redframes/26", "ground_truth": " _check_type(columns, {list, str})\n columns = [columns] if isinstance(columns, str) else columns\n if len(set(columns)) != len(columns):\n raise KeyError(f\"column keys must be unique\")\n bad_columns = list(set(columns) - set(df.columns))\n if bad_columns and len(bad_columns) == 1:\n raise KeyError(f\"column key: {bad_columns} is invalid\")\n if bad_columns and len(bad_columns) > 1:\n raise KeyError(f\"column keys: {bad_columns} are invalid\")\n df = df[columns]\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "verbs", "select.py"], "context_start_lineno": 0, "lineno": 7, "function_name": "select"}, "groundtruth": " _check_type(columns, {list, str})\n columns = [columns] if isinstance(columns, str) else columns\n if len(set(columns)) != len(columns):\n raise KeyError(f\"column keys must be unique\")\n bad_columns = list(set(columns) - set(df.columns))\n if bad_columns and len(bad_columns) == 1:\n raise KeyError(f\"column key: {bad_columns} is invalid\")\n if bad_columns and len(bad_columns) > 1:\n raise KeyError(f\"column keys: {bad_columns} are invalid\")\n df = df[columns]\n return df\n"} +{"prompt": "from ..checks import _check_file, _check_type\nfrom ..core import DataFrame\n\n\ndef save(df: DataFrame, path: str, **kwargs) -> None:\n \"\"\"Save a rf.DataFrame to a csv file (opposite of `load`)\n\n Example:\n\n ```python\n rf.save(df, \"example.csv\")\n ```\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/27", "ground_truth": " _check_type(df, DataFrame)\n _check_type(path, str)\n _check_file(path)\n df._data.to_csv(path, index=False, **kwargs)\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "io", "save.py"], "context_start_lineno": 0, "lineno": 13, "function_name": "save"}, "groundtruth": " _check_type(df, DataFrame)\n _check_type(path, str)\n _check_file(path)\n df._data.to_csv(path, index=False, **kwargs)\n"} +{"prompt": "import pandas as pd # pyright: ignore[reportMissingImports]\n\nfrom redframes.types import PandasDataFrame\n\nfrom ..checks import _check_columns, _check_file, _check_index, _check_type\nfrom ..core import DataFrame, _wrap\n\n\ndef load(path: str, **kwargs) -> DataFrame:\n \"\"\"Load a csv file into a rf.DataFrame (opposite of `save`)\n\n Example:\n\n ```python\n df = rf.load(\"example.csv\")\n ```\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/28", "ground_truth": " _check_type(path, str)\n _check_file(path)\n data: PandasDataFrame = pd.read_csv(path, **kwargs) # type: ignore\n _check_index(data)\n _check_columns(data)\n return _wrap(data)\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "io", "load.py"], "context_start_lineno": 0, "lineno": 17, "function_name": "load"}, "groundtruth": " _check_type(path, str)\n _check_file(path)\n data: PandasDataFrame = pd.read_csv(path, **kwargs) # type: ignore\n _check_index(data)\n _check_columns(data)\n return _wrap(data)\n"} +{"prompt": "from __future__ import annotations\n\nfrom ..checks import _check_columns, _check_index, _check_type\nfrom ..core import DataFrame\nfrom ..types import PandasDataFrame\n\n\ndef unwrap(rdf: DataFrame) -> PandasDataFrame:\n \"\"\"Convert a rf.DataFrame into a pd.DataFrame (opposite of `wrap`)\n\n Example:\n\n ```python\n rdf = rf.DataFrame({\"foo\": range(10)})\n pdf = rf.unwrap(rdf)\n ```\n \"\"\"\n _check_type(rdf, DataFrame)\n return rdf._data.copy()\n\n\ndef wrap(pdf: PandasDataFrame) -> DataFrame:\n \"\"\"Convert a pd.DataFrame into a rf.DataFrame (opposite of `unwrap`)\n\n Example:\n\n ```python\n pdf = pd.DataFrame({\"foo\": range(10)})\n rdf = rf.wrap(pdf)\n ```\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/29", "ground_truth": " _check_type(pdf, PandasDataFrame)\n _check_index(pdf)\n _check_columns(pdf)\n rdf = DataFrame()\n rdf._data = pdf.copy()\n return rdf\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "io", "convert.py"], "context_start_lineno": 0, "lineno": 31, "function_name": "wrap"}, "groundtruth": " _check_type(pdf, PandasDataFrame)\n _check_index(pdf)\n _check_columns(pdf)\n rdf = DataFrame()\n rdf._data = pdf.copy()\n return rdf\n"} +{"prompt": "from __future__ import annotations\n\nimport pprint\nimport warnings\n\nfrom .checks import _check_type\nfrom .types import (\n Any,\n Column,\n Columns,\n DateTime,\n Direction,\n Func,\n Join,\n LazyColumns,\n NewColumn,\n NewValue,\n NumpyArray,\n NumpyType,\n OldColumn,\n OldValue,\n PandasDataFrame,\n PandasGroupedFrame,\n Value,\n Values,\n)\nfrom .verbs import (\n accumulate,\n append,\n combine,\n cross,\n dedupe,\n denix,\n drop,\n fill,\n filter,\n gather,\n group,\n join,\n mutate,\n pack,\n rank,\n rename,\n replace,\n rollup,\n sample,\n select,\n shuffle,\n sort,\n split,\n spread,\n take,\n unpack,\n)\n\n\ndef _wrap(data: PandasDataFrame) -> DataFrame:\n \"\"\"Unsafe version of redframes.io.wrap()\"\"\"", "metadata": {"task_id": "maxhumber--redframes/30", "ground_truth": " df = DataFrame()\n df._data = data\n return df\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "core.py"], "context_start_lineno": 0, "lineno": 58, "function_name": "_wrap"}, "groundtruth": " df = DataFrame()\n df._data = data\n return df\n"} +{"prompt": "from __future__ import annotations\n\nimport pprint\nimport warnings\n\nfrom .checks import _check_type\nfrom .types import (\n Any,\n Column,\n Columns,\n DateTime,\n Direction,\n Func,\n Join,\n LazyColumns,\n NewColumn,\n NewValue,\n NumpyArray,\n NumpyType,\n OldColumn,\n OldValue,\n PandasDataFrame,\n PandasGroupedFrame,\n Value,\n Values,\n)\nfrom .verbs import (\n accumulate,\n append,\n combine,\n cross,\n dedupe,\n denix,\n drop,\n fill,\n filter,\n gather,\n group,\n join,\n mutate,\n pack,\n rank,\n rename,\n replace,\n rollup,\n sample,\n select,\n shuffle,\n sort,\n split,\n spread,\n take,\n unpack,\n)\n\n\ndef _wrap(data: PandasDataFrame) -> DataFrame:\n \"\"\"Unsafe version of redframes.io.wrap()\"\"\"\n df = DataFrame()\n df._data = data\n return df\n\n\nclass _TakeMixin:\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def take(self, rows: int, **kwargs) -> DataFrame:\n \"\"\"Take any number of rows (from the top/bottom)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10)})\n ```\n | foo |\n |------:|\n | 0 |\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n | 5 |\n | 6 |\n | 7 |\n | 8 |\n | 9 |\n\n From \"head\":\n\n ```python\n df.take(1)\n ```\n | foo |\n |------:|\n | 0 |\n\n From \"tail\":\n\n ```python\n df.take(-2)\n ```\n | foo |\n |------:|\n | 8 |\n | 9 |\n \"\"\"\n return _wrap(take(self._data, rows, **kwargs))\n\n\nclass _InterchangeMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame) -> None:\n self._data = data\n\n def __array__(self) -> NumpyArray:\n return self._data.__array__()\n\n def __dataframe__(self, nan_as_null=False, allow_copy=True) -> \"PandasDataFrameXchg\": # type: ignore\n return self._data.__dataframe__(nan_as_null, allow_copy)\n\n def __len__(self) -> int:\n return self._data.__len__()\n\n @property\n def iloc(self):\n return self._data.iloc\n\n\nclass _CommonMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def accumulate(self, column: Column, into: Column) -> DataFrame:\n \"\"\"Run a cumulative sum over a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4]})\n ```\n | foo |\n |------:|\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n\n ```python\n df.accumulate(\"foo\", into=\"cumsum\")\n ```\n | foo | cumsum |\n |------:|---------:|\n | 1 | 1 |\n | 2 | 3 |\n | 3 | 6 |\n | 4 | 10 |\n \"\"\"\n return _wrap(accumulate(self._data, column, into))\n\n def gather(\n self,\n columns: Columns | None = None,\n beside: LazyColumns | None = None,\n into: tuple[Column, Column] = (\"variable\", \"value\"),\n ):\n \"\"\"Gather columns into rows (opposite of spread)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [1, 2, 1, 2],\n \"bar\": [\"A\", \"B\", \"C\", \"D\"],\n \"baz\": [\"!\", \"@\", \"#\", \"$\"],\n \"jaz\": range(4)\n })\n ```\n | foo | bar | baz | jaz |\n |------:|:------|:------|------:|\n | 1 | A | ! | 0 |\n | 2 | B | @ | 1 |\n | 1 | C | # | 2 |\n | 2 | D | $ | 3 |\n\n All columns:\n\n ```python\n df.gather()\n ```\n | variable | value |\n |:-----------|:--------|\n | foo | 1 |\n | foo | 2 |\n | foo | 1 |\n | foo | 2 |\n | bar | A |\n | bar | B |\n | bar | C |\n | bar | D |\n | baz | ! |\n | baz | @ |\n | baz | # |\n | baz | $ |\n | jaz | 0 |\n | jaz | 1 |\n | jaz | 2 |\n | jaz | 3 |\n\n Multiple columns:\n\n ```python\n df.gather([\"foo\", \"bar\"], into=(\"var\", \"val\"))\n ```\n | baz | jaz | var | val |\n |:------|------:|:------|:------|\n | ! | 0 | foo | 1 |\n | @ | 1 | foo | 2 |\n | # | 2 | foo | 1 |\n | $ | 3 | foo | 2 |\n | ! | 0 | bar | A |\n | @ | 1 | bar | B |\n | # | 2 | bar | C |\n | $ | 3 | bar | D |\n\n All columns beside:\n\n ```python\n df.group([\"foo\", \"bar\"]).gather(into=(\"variable\", \"value\"))\n ```\n | foo | bar | variable | value |\n |------:|:------|:-----------|:--------|\n | 1 | A | baz | ! |\n | 2 | B | baz | @ |\n | 1 | C | baz | # |\n | 2 | D | baz | $ |\n | 1 | A | jaz | 0 |\n | 2 | B | jaz | 1 |\n | 1 | C | jaz | 2 |\n | 2 | D | jaz | 3 |\n \"\"\"\n return _wrap(gather(self._data, columns, beside, into))\n\n def pack(self, column: Column, sep: str) -> DataFrame:\n \"\"\"Collate and concatenate row values for a target column (opposite of unpack)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [\"A\", \"A\", \"B\", \"A\", \"B\", \"C\"],\n \"bar\": [1, 2, 3, 4, 5, 6]\n })\n ```\n | foo | bar |\n |:------|------:|\n | A | 1 |\n | A | 2 |\n | B | 3 |\n | A | 4 |\n | B | 5 |\n | C | 6 |\n\n Pack all rows:\n\n ```python\n df.pack(\"foo\", sep=\"+\")\n ```\n | foo |\n |:------------|\n | A+A+B+A+B+C |\n\n Pack rows by Group:\n\n ```python\n df.group(\"foo\").pack(\"bar\", sep=\"|\")\n ```\n | foo | bar |\n |:------|:------|\n | A | 1|2|4 |\n | B | 3|5 |\n | C | 6 |\n \"\"\"\n return _wrap(pack(self._data, column, sep))\n\n def rank(\n self,\n column: Column,\n into: Column,\n descending: bool = False,\n ) -> DataFrame:\n \"\"\"Rank order values in a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [2, 3, 3, 99, 1000, 1, -6, 4]})\n ```\n | foo |\n |------:|\n | 2 |\n | 3 |\n | 3 |\n | 99 |\n | 1000 |\n | 1 |\n | -6 |\n | 4 |\n\n ```python\n df.rank(\"foo\", into=\"rank\", descending=True)\n ```\n | foo | rank |\n |------:|-------:|\n | 2 | 5 |\n | 3 | 4 |\n | 3 | 4 |\n | 99 | 2 |\n | 1000 | 1 |\n | 1 | 6 |\n | -6 | 7 |\n | 4 | 3 |\n \"\"\"\n return _wrap(rank(self._data, column, into, descending))\n\n def rollup(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n \"\"\"Apply summary functions and/or statistics to target columns\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4, 5], \"bar\": [99, 100, 1, -5, 2]})\n ```\n | foo | bar |\n |------:|------:|\n | 1 | 99 |\n | 2 | 100 |\n | 3 | 1 |\n | 4 | -5 |\n | 5 | 2 |\n\n ```python\n df.rollup({\n \"fcount\": (\"foo\", rf.stat.count),\n \"fmean\": (\"foo\", rf.stat.mean),\n \"fsum\": (\"foo\", rf.stat.sum),\n \"fmax\": (\"foo\", rf.stat.max),\n \"bmedian\": (\"bar\", rf.stat.median),\n \"bmin\": (\"bar\", rf.stat.min),\n \"bstd\": (\"bar\", rf.stat.std)\n })\n ```\n | fcount | fmean | fsum | fmax | bmedian | bmin | bstd |\n |---------:|--------:|-------:|-------:|----------:|-------:|-------:|\n | 5 | 3 | 15 | 5 | 2 | -5 | 54.93 |\n \"\"\"\n return _wrap(rollup(self._data, over))\n\n def summarize(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:", "metadata": {"task_id": "maxhumber--redframes/31", "ground_truth": " message = \"Marked for removal, please use `rollup` instead\"\n warnings.warn(message, FutureWarning)\n return self.rollup(over)\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "core.py"], "context_start_lineno": 0, "lineno": 358, "function_name": "summarize"}, "groundtruth": " message = \"Marked for removal, please use `rollup` instead\"\n warnings.warn(message, FutureWarning)\n return self.rollup(over)\n"} +{"prompt": "from __future__ import annotations\n\nimport pprint\nimport warnings\n\nfrom .checks import _check_type\nfrom .types import (\n Any,\n Column,\n Columns,\n DateTime,\n Direction,\n Func,\n Join,\n LazyColumns,\n NewColumn,\n NewValue,\n NumpyArray,\n NumpyType,\n OldColumn,\n OldValue,\n PandasDataFrame,\n PandasGroupedFrame,\n Value,\n Values,\n)\nfrom .verbs import (\n accumulate,\n append,\n combine,\n cross,\n dedupe,\n denix,\n drop,\n fill,\n filter,\n gather,\n group,\n join,\n mutate,\n pack,\n rank,\n rename,\n replace,\n rollup,\n sample,\n select,\n shuffle,\n sort,\n split,\n spread,\n take,\n unpack,\n)\n\n\ndef _wrap(data: PandasDataFrame) -> DataFrame:\n \"\"\"Unsafe version of redframes.io.wrap()\"\"\"\n df = DataFrame()\n df._data = data\n return df\n\n\nclass _TakeMixin:\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def take(self, rows: int, **kwargs) -> DataFrame:\n \"\"\"Take any number of rows (from the top/bottom)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10)})\n ```\n | foo |\n |------:|\n | 0 |\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n | 5 |\n | 6 |\n | 7 |\n | 8 |\n | 9 |\n\n From \"head\":\n\n ```python\n df.take(1)\n ```\n | foo |\n |------:|\n | 0 |\n\n From \"tail\":\n\n ```python\n df.take(-2)\n ```\n | foo |\n |------:|\n | 8 |\n | 9 |\n \"\"\"\n return _wrap(take(self._data, rows, **kwargs))\n\n\nclass _InterchangeMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame) -> None:\n self._data = data\n\n def __array__(self) -> NumpyArray:\n return self._data.__array__()\n\n def __dataframe__(self, nan_as_null=False, allow_copy=True) -> \"PandasDataFrameXchg\": # type: ignore\n return self._data.__dataframe__(nan_as_null, allow_copy)\n\n def __len__(self) -> int:\n return self._data.__len__()\n\n @property\n def iloc(self):\n return self._data.iloc\n\n\nclass _CommonMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def accumulate(self, column: Column, into: Column) -> DataFrame:\n \"\"\"Run a cumulative sum over a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4]})\n ```\n | foo |\n |------:|\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n\n ```python\n df.accumulate(\"foo\", into=\"cumsum\")\n ```\n | foo | cumsum |\n |------:|---------:|\n | 1 | 1 |\n | 2 | 3 |\n | 3 | 6 |\n | 4 | 10 |\n \"\"\"\n return _wrap(accumulate(self._data, column, into))\n\n def gather(\n self,\n columns: Columns | None = None,\n beside: LazyColumns | None = None,\n into: tuple[Column, Column] = (\"variable\", \"value\"),\n ):\n \"\"\"Gather columns into rows (opposite of spread)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [1, 2, 1, 2],\n \"bar\": [\"A\", \"B\", \"C\", \"D\"],\n \"baz\": [\"!\", \"@\", \"#\", \"$\"],\n \"jaz\": range(4)\n })\n ```\n | foo | bar | baz | jaz |\n |------:|:------|:------|------:|\n | 1 | A | ! | 0 |\n | 2 | B | @ | 1 |\n | 1 | C | # | 2 |\n | 2 | D | $ | 3 |\n\n All columns:\n\n ```python\n df.gather()\n ```\n | variable | value |\n |:-----------|:--------|\n | foo | 1 |\n | foo | 2 |\n | foo | 1 |\n | foo | 2 |\n | bar | A |\n | bar | B |\n | bar | C |\n | bar | D |\n | baz | ! |\n | baz | @ |\n | baz | # |\n | baz | $ |\n | jaz | 0 |\n | jaz | 1 |\n | jaz | 2 |\n | jaz | 3 |\n\n Multiple columns:\n\n ```python\n df.gather([\"foo\", \"bar\"], into=(\"var\", \"val\"))\n ```\n | baz | jaz | var | val |\n |:------|------:|:------|:------|\n | ! | 0 | foo | 1 |\n | @ | 1 | foo | 2 |\n | # | 2 | foo | 1 |\n | $ | 3 | foo | 2 |\n | ! | 0 | bar | A |\n | @ | 1 | bar | B |\n | # | 2 | bar | C |\n | $ | 3 | bar | D |\n\n All columns beside:\n\n ```python\n df.group([\"foo\", \"bar\"]).gather(into=(\"variable\", \"value\"))\n ```\n | foo | bar | variable | value |\n |------:|:------|:-----------|:--------|\n | 1 | A | baz | ! |\n | 2 | B | baz | @ |\n | 1 | C | baz | # |\n | 2 | D | baz | $ |\n | 1 | A | jaz | 0 |\n | 2 | B | jaz | 1 |\n | 1 | C | jaz | 2 |\n | 2 | D | jaz | 3 |\n \"\"\"\n return _wrap(gather(self._data, columns, beside, into))\n\n def pack(self, column: Column, sep: str) -> DataFrame:\n \"\"\"Collate and concatenate row values for a target column (opposite of unpack)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [\"A\", \"A\", \"B\", \"A\", \"B\", \"C\"],\n \"bar\": [1, 2, 3, 4, 5, 6]\n })\n ```\n | foo | bar |\n |:------|------:|\n | A | 1 |\n | A | 2 |\n | B | 3 |\n | A | 4 |\n | B | 5 |\n | C | 6 |\n\n Pack all rows:\n\n ```python\n df.pack(\"foo\", sep=\"+\")\n ```\n | foo |\n |:------------|\n | A+A+B+A+B+C |\n\n Pack rows by Group:\n\n ```python\n df.group(\"foo\").pack(\"bar\", sep=\"|\")\n ```\n | foo | bar |\n |:------|:------|\n | A | 1|2|4 |\n | B | 3|5 |\n | C | 6 |\n \"\"\"\n return _wrap(pack(self._data, column, sep))\n\n def rank(\n self,\n column: Column,\n into: Column,\n descending: bool = False,\n ) -> DataFrame:\n \"\"\"Rank order values in a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [2, 3, 3, 99, 1000, 1, -6, 4]})\n ```\n | foo |\n |------:|\n | 2 |\n | 3 |\n | 3 |\n | 99 |\n | 1000 |\n | 1 |\n | -6 |\n | 4 |\n\n ```python\n df.rank(\"foo\", into=\"rank\", descending=True)\n ```\n | foo | rank |\n |------:|-------:|\n | 2 | 5 |\n | 3 | 4 |\n | 3 | 4 |\n | 99 | 2 |\n | 1000 | 1 |\n | 1 | 6 |\n | -6 | 7 |\n | 4 | 3 |\n \"\"\"\n return _wrap(rank(self._data, column, into, descending))\n\n def rollup(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n \"\"\"Apply summary functions and/or statistics to target columns\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4, 5], \"bar\": [99, 100, 1, -5, 2]})\n ```\n | foo | bar |\n |------:|------:|\n | 1 | 99 |\n | 2 | 100 |\n | 3 | 1 |\n | 4 | -5 |\n | 5 | 2 |\n\n ```python\n df.rollup({\n \"fcount\": (\"foo\", rf.stat.count),\n \"fmean\": (\"foo\", rf.stat.mean),\n \"fsum\": (\"foo\", rf.stat.sum),\n \"fmax\": (\"foo\", rf.stat.max),\n \"bmedian\": (\"bar\", rf.stat.median),\n \"bmin\": (\"bar\", rf.stat.min),\n \"bstd\": (\"bar\", rf.stat.std)\n })\n ```\n | fcount | fmean | fsum | fmax | bmedian | bmin | bstd |\n |---------:|--------:|-------:|-------:|----------:|-------:|-------:|\n | 5 | 3 | 15 | 5 | 2 | -5 | 54.93 |\n \"\"\"\n return _wrap(rollup(self._data, over))\n\n def summarize(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n message = \"Marked for removal, please use `rollup` instead\"\n warnings.warn(message, FutureWarning)\n return self.rollup(over)\n\n\nclass GroupedFrame(_CommonMixin):\n \"\"\"GroupedFrame compatible with: `accumulate`, `gather`, `pack`, `rank`, `rollup`, `take`\"\"\"\n\n def __repr__(self) -> str:\n return self._data.obj.__repr__() # type: ignore\n\n def _repr_html_(self) -> str:\n return self._data.obj.to_html(index=True) # type: ignore\n\n\nclass DataFrame(_CommonMixin, _InterchangeMixin):\n def __init__(self, data: dict[Column, Values] | None = None) -> None:\n \"\"\"Initialize a DataFrame with a standard dictionary\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n ```\n | foo | bar |\n |------:|:------|\n | 1 | A |\n | 2 | B |\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/32", "ground_truth": " _check_type(data, {dict, None})\n if not data:\n self._data = PandasDataFrame()\n if isinstance(data, dict):\n self._data = PandasDataFrame(data)\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "core.py"], "context_start_lineno": 0, "lineno": 387, "function_name": "__init__"}, "groundtruth": " _check_type(data, {dict, None})\n if not data:\n self._data = PandasDataFrame()\n if isinstance(data, dict):\n self._data = PandasDataFrame(data)\n"} +{"prompt": "from __future__ import annotations\n\nimport pprint\nimport warnings\n\nfrom .checks import _check_type\nfrom .types import (\n Any,\n Column,\n Columns,\n DateTime,\n Direction,\n Func,\n Join,\n LazyColumns,\n NewColumn,\n NewValue,\n NumpyArray,\n NumpyType,\n OldColumn,\n OldValue,\n PandasDataFrame,\n PandasGroupedFrame,\n Value,\n Values,\n)\nfrom .verbs import (\n accumulate,\n append,\n combine,\n cross,\n dedupe,\n denix,\n drop,\n fill,\n filter,\n gather,\n group,\n join,\n mutate,\n pack,\n rank,\n rename,\n replace,\n rollup,\n sample,\n select,\n shuffle,\n sort,\n split,\n spread,\n take,\n unpack,\n)\n\n\ndef _wrap(data: PandasDataFrame) -> DataFrame:\n \"\"\"Unsafe version of redframes.io.wrap()\"\"\"\n df = DataFrame()\n df._data = data\n return df\n\n\nclass _TakeMixin:\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def take(self, rows: int, **kwargs) -> DataFrame:\n \"\"\"Take any number of rows (from the top/bottom)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10)})\n ```\n | foo |\n |------:|\n | 0 |\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n | 5 |\n | 6 |\n | 7 |\n | 8 |\n | 9 |\n\n From \"head\":\n\n ```python\n df.take(1)\n ```\n | foo |\n |------:|\n | 0 |\n\n From \"tail\":\n\n ```python\n df.take(-2)\n ```\n | foo |\n |------:|\n | 8 |\n | 9 |\n \"\"\"\n return _wrap(take(self._data, rows, **kwargs))\n\n\nclass _InterchangeMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame) -> None:\n self._data = data\n\n def __array__(self) -> NumpyArray:\n return self._data.__array__()\n\n def __dataframe__(self, nan_as_null=False, allow_copy=True) -> \"PandasDataFrameXchg\": # type: ignore\n return self._data.__dataframe__(nan_as_null, allow_copy)\n\n def __len__(self) -> int:\n return self._data.__len__()\n\n @property\n def iloc(self):\n return self._data.iloc\n\n\nclass _CommonMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def accumulate(self, column: Column, into: Column) -> DataFrame:\n \"\"\"Run a cumulative sum over a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4]})\n ```\n | foo |\n |------:|\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n\n ```python\n df.accumulate(\"foo\", into=\"cumsum\")\n ```\n | foo | cumsum |\n |------:|---------:|\n | 1 | 1 |\n | 2 | 3 |\n | 3 | 6 |\n | 4 | 10 |\n \"\"\"\n return _wrap(accumulate(self._data, column, into))\n\n def gather(\n self,\n columns: Columns | None = None,\n beside: LazyColumns | None = None,\n into: tuple[Column, Column] = (\"variable\", \"value\"),\n ):\n \"\"\"Gather columns into rows (opposite of spread)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [1, 2, 1, 2],\n \"bar\": [\"A\", \"B\", \"C\", \"D\"],\n \"baz\": [\"!\", \"@\", \"#\", \"$\"],\n \"jaz\": range(4)\n })\n ```\n | foo | bar | baz | jaz |\n |------:|:------|:------|------:|\n | 1 | A | ! | 0 |\n | 2 | B | @ | 1 |\n | 1 | C | # | 2 |\n | 2 | D | $ | 3 |\n\n All columns:\n\n ```python\n df.gather()\n ```\n | variable | value |\n |:-----------|:--------|\n | foo | 1 |\n | foo | 2 |\n | foo | 1 |\n | foo | 2 |\n | bar | A |\n | bar | B |\n | bar | C |\n | bar | D |\n | baz | ! |\n | baz | @ |\n | baz | # |\n | baz | $ |\n | jaz | 0 |\n | jaz | 1 |\n | jaz | 2 |\n | jaz | 3 |\n\n Multiple columns:\n\n ```python\n df.gather([\"foo\", \"bar\"], into=(\"var\", \"val\"))\n ```\n | baz | jaz | var | val |\n |:------|------:|:------|:------|\n | ! | 0 | foo | 1 |\n | @ | 1 | foo | 2 |\n | # | 2 | foo | 1 |\n | $ | 3 | foo | 2 |\n | ! | 0 | bar | A |\n | @ | 1 | bar | B |\n | # | 2 | bar | C |\n | $ | 3 | bar | D |\n\n All columns beside:\n\n ```python\n df.group([\"foo\", \"bar\"]).gather(into=(\"variable\", \"value\"))\n ```\n | foo | bar | variable | value |\n |------:|:------|:-----------|:--------|\n | 1 | A | baz | ! |\n | 2 | B | baz | @ |\n | 1 | C | baz | # |\n | 2 | D | baz | $ |\n | 1 | A | jaz | 0 |\n | 2 | B | jaz | 1 |\n | 1 | C | jaz | 2 |\n | 2 | D | jaz | 3 |\n \"\"\"\n return _wrap(gather(self._data, columns, beside, into))\n\n def pack(self, column: Column, sep: str) -> DataFrame:\n \"\"\"Collate and concatenate row values for a target column (opposite of unpack)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [\"A\", \"A\", \"B\", \"A\", \"B\", \"C\"],\n \"bar\": [1, 2, 3, 4, 5, 6]\n })\n ```\n | foo | bar |\n |:------|------:|\n | A | 1 |\n | A | 2 |\n | B | 3 |\n | A | 4 |\n | B | 5 |\n | C | 6 |\n\n Pack all rows:\n\n ```python\n df.pack(\"foo\", sep=\"+\")\n ```\n | foo |\n |:------------|\n | A+A+B+A+B+C |\n\n Pack rows by Group:\n\n ```python\n df.group(\"foo\").pack(\"bar\", sep=\"|\")\n ```\n | foo | bar |\n |:------|:------|\n | A | 1|2|4 |\n | B | 3|5 |\n | C | 6 |\n \"\"\"\n return _wrap(pack(self._data, column, sep))\n\n def rank(\n self,\n column: Column,\n into: Column,\n descending: bool = False,\n ) -> DataFrame:\n \"\"\"Rank order values in a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [2, 3, 3, 99, 1000, 1, -6, 4]})\n ```\n | foo |\n |------:|\n | 2 |\n | 3 |\n | 3 |\n | 99 |\n | 1000 |\n | 1 |\n | -6 |\n | 4 |\n\n ```python\n df.rank(\"foo\", into=\"rank\", descending=True)\n ```\n | foo | rank |\n |------:|-------:|\n | 2 | 5 |\n | 3 | 4 |\n | 3 | 4 |\n | 99 | 2 |\n | 1000 | 1 |\n | 1 | 6 |\n | -6 | 7 |\n | 4 | 3 |\n \"\"\"\n return _wrap(rank(self._data, column, into, descending))\n\n def rollup(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n \"\"\"Apply summary functions and/or statistics to target columns\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4, 5], \"bar\": [99, 100, 1, -5, 2]})\n ```\n | foo | bar |\n |------:|------:|\n | 1 | 99 |\n | 2 | 100 |\n | 3 | 1 |\n | 4 | -5 |\n | 5 | 2 |\n\n ```python\n df.rollup({\n \"fcount\": (\"foo\", rf.stat.count),\n \"fmean\": (\"foo\", rf.stat.mean),\n \"fsum\": (\"foo\", rf.stat.sum),\n \"fmax\": (\"foo\", rf.stat.max),\n \"bmedian\": (\"bar\", rf.stat.median),\n \"bmin\": (\"bar\", rf.stat.min),\n \"bstd\": (\"bar\", rf.stat.std)\n })\n ```\n | fcount | fmean | fsum | fmax | bmedian | bmin | bstd |\n |---------:|--------:|-------:|-------:|----------:|-------:|-------:|\n | 5 | 3 | 15 | 5 | 2 | -5 | 54.93 |\n \"\"\"\n return _wrap(rollup(self._data, over))\n\n def summarize(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n message = \"Marked for removal, please use `rollup` instead\"\n warnings.warn(message, FutureWarning)\n return self.rollup(over)\n\n\nclass GroupedFrame(_CommonMixin):\n \"\"\"GroupedFrame compatible with: `accumulate`, `gather`, `pack`, `rank`, `rollup`, `take`\"\"\"\n\n def __repr__(self) -> str:\n return self._data.obj.__repr__() # type: ignore\n\n def _repr_html_(self) -> str:\n return self._data.obj.to_html(index=True) # type: ignore\n\n\nclass DataFrame(_CommonMixin, _InterchangeMixin):\n def __init__(self, data: dict[Column, Values] | None = None) -> None:\n \"\"\"Initialize a DataFrame with a standard dictionary\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n ```\n | foo | bar |\n |------:|:------|\n | 1 | A |\n | 2 | B |\n \"\"\"\n _check_type(data, {dict, None})\n if not data:\n self._data = PandasDataFrame()\n if isinstance(data, dict):\n self._data = PandasDataFrame(data)\n\n def __eq__(self, rhs: Any) -> bool:\n \"\"\"Check if two DataFrames are equal to each other\n\n Example:\n\n ```python\n adf = rf.DataFrame({\"foo\": [1]})\n bdf = rf.DataFrame({\"bar\": [1]})\n cdf = rf.DataFrame({\"foo\": [1]})\n print(adf == bdf)\n print(adf == cdf)\n # False\n # True\n ```\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/33", "ground_truth": " if not isinstance(rhs, DataFrame):\n return False\n return self._data.equals(rhs._data)\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "core.py"], "context_start_lineno": 0, "lineno": 408, "function_name": "__eq__"}, "groundtruth": " if not isinstance(rhs, DataFrame):\n return False\n return self._data.equals(rhs._data)\n"} +{"prompt": "from __future__ import annotations\n\nimport pprint\nimport warnings\n\nfrom .checks import _check_type\nfrom .types import (\n Any,\n Column,\n Columns,\n DateTime,\n Direction,\n Func,\n Join,\n LazyColumns,\n NewColumn,\n NewValue,\n NumpyArray,\n NumpyType,\n OldColumn,\n OldValue,\n PandasDataFrame,\n PandasGroupedFrame,\n Value,\n Values,\n)\nfrom .verbs import (\n accumulate,\n append,\n combine,\n cross,\n dedupe,\n denix,\n drop,\n fill,\n filter,\n gather,\n group,\n join,\n mutate,\n pack,\n rank,\n rename,\n replace,\n rollup,\n sample,\n select,\n shuffle,\n sort,\n split,\n spread,\n take,\n unpack,\n)\n\n\ndef _wrap(data: PandasDataFrame) -> DataFrame:\n \"\"\"Unsafe version of redframes.io.wrap()\"\"\"\n df = DataFrame()\n df._data = data\n return df\n\n\nclass _TakeMixin:\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def take(self, rows: int, **kwargs) -> DataFrame:\n \"\"\"Take any number of rows (from the top/bottom)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10)})\n ```\n | foo |\n |------:|\n | 0 |\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n | 5 |\n | 6 |\n | 7 |\n | 8 |\n | 9 |\n\n From \"head\":\n\n ```python\n df.take(1)\n ```\n | foo |\n |------:|\n | 0 |\n\n From \"tail\":\n\n ```python\n df.take(-2)\n ```\n | foo |\n |------:|\n | 8 |\n | 9 |\n \"\"\"\n return _wrap(take(self._data, rows, **kwargs))\n\n\nclass _InterchangeMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame) -> None:\n self._data = data\n\n def __array__(self) -> NumpyArray:\n return self._data.__array__()\n\n def __dataframe__(self, nan_as_null=False, allow_copy=True) -> \"PandasDataFrameXchg\": # type: ignore\n return self._data.__dataframe__(nan_as_null, allow_copy)\n\n def __len__(self) -> int:\n return self._data.__len__()\n\n @property\n def iloc(self):\n return self._data.iloc\n\n\nclass _CommonMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def accumulate(self, column: Column, into: Column) -> DataFrame:\n \"\"\"Run a cumulative sum over a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4]})\n ```\n | foo |\n |------:|\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n\n ```python\n df.accumulate(\"foo\", into=\"cumsum\")\n ```\n | foo | cumsum |\n |------:|---------:|\n | 1 | 1 |\n | 2 | 3 |\n | 3 | 6 |\n | 4 | 10 |\n \"\"\"\n return _wrap(accumulate(self._data, column, into))\n\n def gather(\n self,\n columns: Columns | None = None,\n beside: LazyColumns | None = None,\n into: tuple[Column, Column] = (\"variable\", \"value\"),\n ):\n \"\"\"Gather columns into rows (opposite of spread)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [1, 2, 1, 2],\n \"bar\": [\"A\", \"B\", \"C\", \"D\"],\n \"baz\": [\"!\", \"@\", \"#\", \"$\"],\n \"jaz\": range(4)\n })\n ```\n | foo | bar | baz | jaz |\n |------:|:------|:------|------:|\n | 1 | A | ! | 0 |\n | 2 | B | @ | 1 |\n | 1 | C | # | 2 |\n | 2 | D | $ | 3 |\n\n All columns:\n\n ```python\n df.gather()\n ```\n | variable | value |\n |:-----------|:--------|\n | foo | 1 |\n | foo | 2 |\n | foo | 1 |\n | foo | 2 |\n | bar | A |\n | bar | B |\n | bar | C |\n | bar | D |\n | baz | ! |\n | baz | @ |\n | baz | # |\n | baz | $ |\n | jaz | 0 |\n | jaz | 1 |\n | jaz | 2 |\n | jaz | 3 |\n\n Multiple columns:\n\n ```python\n df.gather([\"foo\", \"bar\"], into=(\"var\", \"val\"))\n ```\n | baz | jaz | var | val |\n |:------|------:|:------|:------|\n | ! | 0 | foo | 1 |\n | @ | 1 | foo | 2 |\n | # | 2 | foo | 1 |\n | $ | 3 | foo | 2 |\n | ! | 0 | bar | A |\n | @ | 1 | bar | B |\n | # | 2 | bar | C |\n | $ | 3 | bar | D |\n\n All columns beside:\n\n ```python\n df.group([\"foo\", \"bar\"]).gather(into=(\"variable\", \"value\"))\n ```\n | foo | bar | variable | value |\n |------:|:------|:-----------|:--------|\n | 1 | A | baz | ! |\n | 2 | B | baz | @ |\n | 1 | C | baz | # |\n | 2 | D | baz | $ |\n | 1 | A | jaz | 0 |\n | 2 | B | jaz | 1 |\n | 1 | C | jaz | 2 |\n | 2 | D | jaz | 3 |\n \"\"\"\n return _wrap(gather(self._data, columns, beside, into))\n\n def pack(self, column: Column, sep: str) -> DataFrame:\n \"\"\"Collate and concatenate row values for a target column (opposite of unpack)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [\"A\", \"A\", \"B\", \"A\", \"B\", \"C\"],\n \"bar\": [1, 2, 3, 4, 5, 6]\n })\n ```\n | foo | bar |\n |:------|------:|\n | A | 1 |\n | A | 2 |\n | B | 3 |\n | A | 4 |\n | B | 5 |\n | C | 6 |\n\n Pack all rows:\n\n ```python\n df.pack(\"foo\", sep=\"+\")\n ```\n | foo |\n |:------------|\n | A+A+B+A+B+C |\n\n Pack rows by Group:\n\n ```python\n df.group(\"foo\").pack(\"bar\", sep=\"|\")\n ```\n | foo | bar |\n |:------|:------|\n | A | 1|2|4 |\n | B | 3|5 |\n | C | 6 |\n \"\"\"\n return _wrap(pack(self._data, column, sep))\n\n def rank(\n self,\n column: Column,\n into: Column,\n descending: bool = False,\n ) -> DataFrame:\n \"\"\"Rank order values in a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [2, 3, 3, 99, 1000, 1, -6, 4]})\n ```\n | foo |\n |------:|\n | 2 |\n | 3 |\n | 3 |\n | 99 |\n | 1000 |\n | 1 |\n | -6 |\n | 4 |\n\n ```python\n df.rank(\"foo\", into=\"rank\", descending=True)\n ```\n | foo | rank |\n |------:|-------:|\n | 2 | 5 |\n | 3 | 4 |\n | 3 | 4 |\n | 99 | 2 |\n | 1000 | 1 |\n | 1 | 6 |\n | -6 | 7 |\n | 4 | 3 |\n \"\"\"\n return _wrap(rank(self._data, column, into, descending))\n\n def rollup(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n \"\"\"Apply summary functions and/or statistics to target columns\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4, 5], \"bar\": [99, 100, 1, -5, 2]})\n ```\n | foo | bar |\n |------:|------:|\n | 1 | 99 |\n | 2 | 100 |\n | 3 | 1 |\n | 4 | -5 |\n | 5 | 2 |\n\n ```python\n df.rollup({\n \"fcount\": (\"foo\", rf.stat.count),\n \"fmean\": (\"foo\", rf.stat.mean),\n \"fsum\": (\"foo\", rf.stat.sum),\n \"fmax\": (\"foo\", rf.stat.max),\n \"bmedian\": (\"bar\", rf.stat.median),\n \"bmin\": (\"bar\", rf.stat.min),\n \"bstd\": (\"bar\", rf.stat.std)\n })\n ```\n | fcount | fmean | fsum | fmax | bmedian | bmin | bstd |\n |---------:|--------:|-------:|-------:|----------:|-------:|-------:|\n | 5 | 3 | 15 | 5 | 2 | -5 | 54.93 |\n \"\"\"\n return _wrap(rollup(self._data, over))\n\n def summarize(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n message = \"Marked for removal, please use `rollup` instead\"\n warnings.warn(message, FutureWarning)\n return self.rollup(over)\n\n\nclass GroupedFrame(_CommonMixin):\n \"\"\"GroupedFrame compatible with: `accumulate`, `gather`, `pack`, `rank`, `rollup`, `take`\"\"\"\n\n def __repr__(self) -> str:\n return self._data.obj.__repr__() # type: ignore\n\n def _repr_html_(self) -> str:\n return self._data.obj.to_html(index=True) # type: ignore\n\n\nclass DataFrame(_CommonMixin, _InterchangeMixin):\n def __init__(self, data: dict[Column, Values] | None = None) -> None:\n \"\"\"Initialize a DataFrame with a standard dictionary\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n ```\n | foo | bar |\n |------:|:------|\n | 1 | A |\n | 2 | B |\n \"\"\"\n _check_type(data, {dict, None})\n if not data:\n self._data = PandasDataFrame()\n if isinstance(data, dict):\n self._data = PandasDataFrame(data)\n\n def __eq__(self, rhs: Any) -> bool:\n \"\"\"Check if two DataFrames are equal to each other\n\n Example:\n\n ```python\n adf = rf.DataFrame({\"foo\": [1]})\n bdf = rf.DataFrame({\"bar\": [1]})\n cdf = rf.DataFrame({\"foo\": [1]})\n print(adf == bdf)\n print(adf == cdf)\n # False\n # True\n ```\n \"\"\"\n if not isinstance(rhs, DataFrame):\n return False\n return self._data.equals(rhs._data)\n\n def __getitem__(self, key: Column) -> Values:\n \"\"\"Retrive values (as a python list) from a specified column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n df[\"foo\"]\n # [1, 2]\n ```\n \"\"\"\n return list(self._data[key])\n\n def __repr__(self) -> str:\n return self._data.__repr__()\n\n def _repr_html_(self) -> str:\n return self._data.to_html(index=True)\n\n def __str__(self) -> str:\n \"\"\"Return string constructor (for copy-and-pasting)\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n str(df)\n # \"rf.DataFrame({'foo': [1, 2], 'bar': ['A', 'B']})\"\n ```\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/34", "ground_truth": " data = self._data.to_dict(orient=\"list\")\n string = pprint.pformat(data, indent=4, sort_dicts=False, compact=True)\n if \"\\n\" in string:\n string = \" \" + string[1:-1]\n string = f\"rf.DataFrame({{\\n{string}\\n}})\"\n else:\n string = f\"rf.DataFrame({string})\"\n return string\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "core.py"], "context_start_lineno": 0, "lineno": 442, "function_name": "__str__"}, "groundtruth": " data = self._data.to_dict(orient=\"list\")\n string = pprint.pformat(data, indent=4, sort_dicts=False, compact=True)\n if \"\\n\" in string:\n string = \" \" + string[1:-1]\n string = f\"rf.DataFrame({{\\n{string}\\n}})\"\n else:\n string = f\"rf.DataFrame({string})\"\n return string\n"} +{"prompt": " PandasGroupedFrame,\n Value,\n Values,\n)\nfrom .verbs import (\n accumulate,\n append,\n combine,\n cross,\n dedupe,\n denix,\n drop,\n fill,\n filter,\n gather,\n group,\n join,\n mutate,\n pack,\n rank,\n rename,\n replace,\n rollup,\n sample,\n select,\n shuffle,\n sort,\n split,\n spread,\n take,\n unpack,\n)\n\n\ndef _wrap(data: PandasDataFrame) -> DataFrame:\n \"\"\"Unsafe version of redframes.io.wrap()\"\"\"\n df = DataFrame()\n df._data = data\n return df\n\n\nclass _TakeMixin:\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def take(self, rows: int, **kwargs) -> DataFrame:\n \"\"\"Take any number of rows (from the top/bottom)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10)})\n ```\n | foo |\n |------:|\n | 0 |\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n | 5 |\n | 6 |\n | 7 |\n | 8 |\n | 9 |\n\n From \"head\":\n\n ```python\n df.take(1)\n ```\n | foo |\n |------:|\n | 0 |\n\n From \"tail\":\n\n ```python\n df.take(-2)\n ```\n | foo |\n |------:|\n | 8 |\n | 9 |\n \"\"\"\n return _wrap(take(self._data, rows, **kwargs))\n\n\nclass _InterchangeMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame) -> None:\n self._data = data\n\n def __array__(self) -> NumpyArray:\n return self._data.__array__()\n\n def __dataframe__(self, nan_as_null=False, allow_copy=True) -> \"PandasDataFrameXchg\": # type: ignore\n return self._data.__dataframe__(nan_as_null, allow_copy)\n\n def __len__(self) -> int:\n return self._data.__len__()\n\n @property\n def iloc(self):\n return self._data.iloc\n\n\nclass _CommonMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def accumulate(self, column: Column, into: Column) -> DataFrame:\n \"\"\"Run a cumulative sum over a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4]})\n ```\n | foo |\n |------:|\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n\n ```python\n df.accumulate(\"foo\", into=\"cumsum\")\n ```\n | foo | cumsum |\n |------:|---------:|\n | 1 | 1 |\n | 2 | 3 |\n | 3 | 6 |\n | 4 | 10 |\n \"\"\"\n return _wrap(accumulate(self._data, column, into))\n\n def gather(\n self,\n columns: Columns | None = None,\n beside: LazyColumns | None = None,\n into: tuple[Column, Column] = (\"variable\", \"value\"),\n ):\n \"\"\"Gather columns into rows (opposite of spread)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [1, 2, 1, 2],\n \"bar\": [\"A\", \"B\", \"C\", \"D\"],\n \"baz\": [\"!\", \"@\", \"#\", \"$\"],\n \"jaz\": range(4)\n })\n ```\n | foo | bar | baz | jaz |\n |------:|:------|:------|------:|\n | 1 | A | ! | 0 |\n | 2 | B | @ | 1 |\n | 1 | C | # | 2 |\n | 2 | D | $ | 3 |\n\n All columns:\n\n ```python\n df.gather()\n ```\n | variable | value |\n |:-----------|:--------|\n | foo | 1 |\n | foo | 2 |\n | foo | 1 |\n | foo | 2 |\n | bar | A |\n | bar | B |\n | bar | C |\n | bar | D |\n | baz | ! |\n | baz | @ |\n | baz | # |\n | baz | $ |\n | jaz | 0 |\n | jaz | 1 |\n | jaz | 2 |\n | jaz | 3 |\n\n Multiple columns:\n\n ```python\n df.gather([\"foo\", \"bar\"], into=(\"var\", \"val\"))\n ```\n | baz | jaz | var | val |\n |:------|------:|:------|:------|\n | ! | 0 | foo | 1 |\n | @ | 1 | foo | 2 |\n | # | 2 | foo | 1 |\n | $ | 3 | foo | 2 |\n | ! | 0 | bar | A |\n | @ | 1 | bar | B |\n | # | 2 | bar | C |\n | $ | 3 | bar | D |\n\n All columns beside:\n\n ```python\n df.group([\"foo\", \"bar\"]).gather(into=(\"variable\", \"value\"))\n ```\n | foo | bar | variable | value |\n |------:|:------|:-----------|:--------|\n | 1 | A | baz | ! |\n | 2 | B | baz | @ |\n | 1 | C | baz | # |\n | 2 | D | baz | $ |\n | 1 | A | jaz | 0 |\n | 2 | B | jaz | 1 |\n | 1 | C | jaz | 2 |\n | 2 | D | jaz | 3 |\n \"\"\"\n return _wrap(gather(self._data, columns, beside, into))\n\n def pack(self, column: Column, sep: str) -> DataFrame:\n \"\"\"Collate and concatenate row values for a target column (opposite of unpack)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [\"A\", \"A\", \"B\", \"A\", \"B\", \"C\"],\n \"bar\": [1, 2, 3, 4, 5, 6]\n })\n ```\n | foo | bar |\n |:------|------:|\n | A | 1 |\n | A | 2 |\n | B | 3 |\n | A | 4 |\n | B | 5 |\n | C | 6 |\n\n Pack all rows:\n\n ```python\n df.pack(\"foo\", sep=\"+\")\n ```\n | foo |\n |:------------|\n | A+A+B+A+B+C |\n\n Pack rows by Group:\n\n ```python\n df.group(\"foo\").pack(\"bar\", sep=\"|\")\n ```\n | foo | bar |\n |:------|:------|\n | A | 1|2|4 |\n | B | 3|5 |\n | C | 6 |\n \"\"\"\n return _wrap(pack(self._data, column, sep))\n\n def rank(\n self,\n column: Column,\n into: Column,\n descending: bool = False,\n ) -> DataFrame:\n \"\"\"Rank order values in a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [2, 3, 3, 99, 1000, 1, -6, 4]})\n ```\n | foo |\n |------:|\n | 2 |\n | 3 |\n | 3 |\n | 99 |\n | 1000 |\n | 1 |\n | -6 |\n | 4 |\n\n ```python\n df.rank(\"foo\", into=\"rank\", descending=True)\n ```\n | foo | rank |\n |------:|-------:|\n | 2 | 5 |\n | 3 | 4 |\n | 3 | 4 |\n | 99 | 2 |\n | 1000 | 1 |\n | 1 | 6 |\n | -6 | 7 |\n | 4 | 3 |\n \"\"\"\n return _wrap(rank(self._data, column, into, descending))\n\n def rollup(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n \"\"\"Apply summary functions and/or statistics to target columns\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4, 5], \"bar\": [99, 100, 1, -5, 2]})\n ```\n | foo | bar |\n |------:|------:|\n | 1 | 99 |\n | 2 | 100 |\n | 3 | 1 |\n | 4 | -5 |\n | 5 | 2 |\n\n ```python\n df.rollup({\n \"fcount\": (\"foo\", rf.stat.count),\n \"fmean\": (\"foo\", rf.stat.mean),\n \"fsum\": (\"foo\", rf.stat.sum),\n \"fmax\": (\"foo\", rf.stat.max),\n \"bmedian\": (\"bar\", rf.stat.median),\n \"bmin\": (\"bar\", rf.stat.min),\n \"bstd\": (\"bar\", rf.stat.std)\n })\n ```\n | fcount | fmean | fsum | fmax | bmedian | bmin | bstd |\n |---------:|--------:|-------:|-------:|----------:|-------:|-------:|\n | 5 | 3 | 15 | 5 | 2 | -5 | 54.93 |\n \"\"\"\n return _wrap(rollup(self._data, over))\n\n def summarize(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n message = \"Marked for removal, please use `rollup` instead\"\n warnings.warn(message, FutureWarning)\n return self.rollup(over)\n\n\nclass GroupedFrame(_CommonMixin):\n \"\"\"GroupedFrame compatible with: `accumulate`, `gather`, `pack`, `rank`, `rollup`, `take`\"\"\"\n\n def __repr__(self) -> str:\n return self._data.obj.__repr__() # type: ignore\n\n def _repr_html_(self) -> str:\n return self._data.obj.to_html(index=True) # type: ignore\n\n\nclass DataFrame(_CommonMixin, _InterchangeMixin):\n def __init__(self, data: dict[Column, Values] | None = None) -> None:\n \"\"\"Initialize a DataFrame with a standard dictionary\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n ```\n | foo | bar |\n |------:|:------|\n | 1 | A |\n | 2 | B |\n \"\"\"\n _check_type(data, {dict, None})\n if not data:\n self._data = PandasDataFrame()\n if isinstance(data, dict):\n self._data = PandasDataFrame(data)\n\n def __eq__(self, rhs: Any) -> bool:\n \"\"\"Check if two DataFrames are equal to each other\n\n Example:\n\n ```python\n adf = rf.DataFrame({\"foo\": [1]})\n bdf = rf.DataFrame({\"bar\": [1]})\n cdf = rf.DataFrame({\"foo\": [1]})\n print(adf == bdf)\n print(adf == cdf)\n # False\n # True\n ```\n \"\"\"\n if not isinstance(rhs, DataFrame):\n return False\n return self._data.equals(rhs._data)\n\n def __getitem__(self, key: Column) -> Values:\n \"\"\"Retrive values (as a python list) from a specified column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n df[\"foo\"]\n # [1, 2]\n ```\n \"\"\"\n return list(self._data[key])\n\n def __repr__(self) -> str:\n return self._data.__repr__()\n\n def _repr_html_(self) -> str:\n return self._data.to_html(index=True)\n\n def __str__(self) -> str:\n \"\"\"Return string constructor (for copy-and-pasting)\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n str(df)\n # \"rf.DataFrame({'foo': [1, 2], 'bar': ['A', 'B']})\"\n ```\n \"\"\"\n data = self._data.to_dict(orient=\"list\")\n string = pprint.pformat(data, indent=4, sort_dicts=False, compact=True)\n if \"\\n\" in string:\n string = \" \" + string[1:-1]\n string = f\"rf.DataFrame({{\\n{string}\\n}})\"\n else:\n string = f\"rf.DataFrame({string})\"\n return string\n\n @property\n def columns(self) -> Columns:\n \"\"\"Inspect column keys (names)\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"], \"baz\": [True, False]})\n df.columns\n # ['foo', 'bar', 'baz']\n ```\n \"\"\"\n return list(self._data.columns)\n\n @property\n def dimensions(self) -> dict[str, int]:\n \"\"\"Inspect DataFrame shape\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10), \"bar\": range(10, 20)})\n df.dimensions\n # {'rows': 10, 'columns': 2}\n ```\n \"\"\"\n return dict(zip([\"rows\", \"columns\"], self._data.shape))\n\n @property\n def empty(self) -> bool:\n \"\"\"Inspect if DataFrame is \"empty\"\n\n Example:\n\n ```python\n df = rf.DataFrame()\n df.empty\n # True\n ```\n \"\"\"\n return self._data.empty\n\n @property\n def memory(self) -> str:\n \"\"\"Interrogate DataFrame (deep) memory usage\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3], \"bar\": [\"A\", \"B\", \"C\"]})\n df.memory\n # '326B'\n ```\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/35", "ground_truth": " size = self._data.memory_usage(deep=True).sum()\n power_labels = {40: \"TB\", 30: \"GB\", 20: \"MB\", 10: \"KB\"}\n for power, label in power_labels.items():\n if size >= (2**power):\n approx_size = size // 2**power\n return f\"{approx_size} {label}\"\n return f\"{size} B\"\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "core.py"], "context_start_lineno": 22, "lineno": 505, "function_name": "memory"}, "groundtruth": " size = self._data.memory_usage(deep=True).sum()\n power_labels = {40: \"TB\", 30: \"GB\", 20: \"MB\", 10: \"KB\"}\n for power, label in power_labels.items():\n if size >= (2**power):\n approx_size = size // 2**power\n return f\"{approx_size} {label}\"\n return f\"{size} B\"\n"} +{"prompt": "asGroupedFrame) -> None:\n self._data = data\n\n def take(self, rows: int, **kwargs) -> DataFrame:\n \"\"\"Take any number of rows (from the top/bottom)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10)})\n ```\n | foo |\n |------:|\n | 0 |\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n | 5 |\n | 6 |\n | 7 |\n | 8 |\n | 9 |\n\n From \"head\":\n\n ```python\n df.take(1)\n ```\n | foo |\n |------:|\n | 0 |\n\n From \"tail\":\n\n ```python\n df.take(-2)\n ```\n | foo |\n |------:|\n | 8 |\n | 9 |\n \"\"\"\n return _wrap(take(self._data, rows, **kwargs))\n\n\nclass _InterchangeMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame) -> None:\n self._data = data\n\n def __array__(self) -> NumpyArray:\n return self._data.__array__()\n\n def __dataframe__(self, nan_as_null=False, allow_copy=True) -> \"PandasDataFrameXchg\": # type: ignore\n return self._data.__dataframe__(nan_as_null, allow_copy)\n\n def __len__(self) -> int:\n return self._data.__len__()\n\n @property\n def iloc(self):\n return self._data.iloc\n\n\nclass _CommonMixin(_TakeMixin):\n def __init__(self, data: PandasDataFrame | PandasGroupedFrame) -> None:\n self._data = data\n\n def accumulate(self, column: Column, into: Column) -> DataFrame:\n \"\"\"Run a cumulative sum over a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4]})\n ```\n | foo |\n |------:|\n | 1 |\n | 2 |\n | 3 |\n | 4 |\n\n ```python\n df.accumulate(\"foo\", into=\"cumsum\")\n ```\n | foo | cumsum |\n |------:|---------:|\n | 1 | 1 |\n | 2 | 3 |\n | 3 | 6 |\n | 4 | 10 |\n \"\"\"\n return _wrap(accumulate(self._data, column, into))\n\n def gather(\n self,\n columns: Columns | None = None,\n beside: LazyColumns | None = None,\n into: tuple[Column, Column] = (\"variable\", \"value\"),\n ):\n \"\"\"Gather columns into rows (opposite of spread)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [1, 2, 1, 2],\n \"bar\": [\"A\", \"B\", \"C\", \"D\"],\n \"baz\": [\"!\", \"@\", \"#\", \"$\"],\n \"jaz\": range(4)\n })\n ```\n | foo | bar | baz | jaz |\n |------:|:------|:------|------:|\n | 1 | A | ! | 0 |\n | 2 | B | @ | 1 |\n | 1 | C | # | 2 |\n | 2 | D | $ | 3 |\n\n All columns:\n\n ```python\n df.gather()\n ```\n | variable | value |\n |:-----------|:--------|\n | foo | 1 |\n | foo | 2 |\n | foo | 1 |\n | foo | 2 |\n | bar | A |\n | bar | B |\n | bar | C |\n | bar | D |\n | baz | ! |\n | baz | @ |\n | baz | # |\n | baz | $ |\n | jaz | 0 |\n | jaz | 1 |\n | jaz | 2 |\n | jaz | 3 |\n\n Multiple columns:\n\n ```python\n df.gather([\"foo\", \"bar\"], into=(\"var\", \"val\"))\n ```\n | baz | jaz | var | val |\n |:------|------:|:------|:------|\n | ! | 0 | foo | 1 |\n | @ | 1 | foo | 2 |\n | # | 2 | foo | 1 |\n | $ | 3 | foo | 2 |\n | ! | 0 | bar | A |\n | @ | 1 | bar | B |\n | # | 2 | bar | C |\n | $ | 3 | bar | D |\n\n All columns beside:\n\n ```python\n df.group([\"foo\", \"bar\"]).gather(into=(\"variable\", \"value\"))\n ```\n | foo | bar | variable | value |\n |------:|:------|:-----------|:--------|\n | 1 | A | baz | ! |\n | 2 | B | baz | @ |\n | 1 | C | baz | # |\n | 2 | D | baz | $ |\n | 1 | A | jaz | 0 |\n | 2 | B | jaz | 1 |\n | 1 | C | jaz | 2 |\n | 2 | D | jaz | 3 |\n \"\"\"\n return _wrap(gather(self._data, columns, beside, into))\n\n def pack(self, column: Column, sep: str) -> DataFrame:\n \"\"\"Collate and concatenate row values for a target column (opposite of unpack)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [\"A\", \"A\", \"B\", \"A\", \"B\", \"C\"],\n \"bar\": [1, 2, 3, 4, 5, 6]\n })\n ```\n | foo | bar |\n |:------|------:|\n | A | 1 |\n | A | 2 |\n | B | 3 |\n | A | 4 |\n | B | 5 |\n | C | 6 |\n\n Pack all rows:\n\n ```python\n df.pack(\"foo\", sep=\"+\")\n ```\n | foo |\n |:------------|\n | A+A+B+A+B+C |\n\n Pack rows by Group:\n\n ```python\n df.group(\"foo\").pack(\"bar\", sep=\"|\")\n ```\n | foo | bar |\n |:------|:------|\n | A | 1|2|4 |\n | B | 3|5 |\n | C | 6 |\n \"\"\"\n return _wrap(pack(self._data, column, sep))\n\n def rank(\n self,\n column: Column,\n into: Column,\n descending: bool = False,\n ) -> DataFrame:\n \"\"\"Rank order values in a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [2, 3, 3, 99, 1000, 1, -6, 4]})\n ```\n | foo |\n |------:|\n | 2 |\n | 3 |\n | 3 |\n | 99 |\n | 1000 |\n | 1 |\n | -6 |\n | 4 |\n\n ```python\n df.rank(\"foo\", into=\"rank\", descending=True)\n ```\n | foo | rank |\n |------:|-------:|\n | 2 | 5 |\n | 3 | 4 |\n | 3 | 4 |\n | 99 | 2 |\n | 1000 | 1 |\n | 1 | 6 |\n | -6 | 7 |\n | 4 | 3 |\n \"\"\"\n return _wrap(rank(self._data, column, into, descending))\n\n def rollup(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n \"\"\"Apply summary functions and/or statistics to target columns\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4, 5], \"bar\": [99, 100, 1, -5, 2]})\n ```\n | foo | bar |\n |------:|------:|\n | 1 | 99 |\n | 2 | 100 |\n | 3 | 1 |\n | 4 | -5 |\n | 5 | 2 |\n\n ```python\n df.rollup({\n \"fcount\": (\"foo\", rf.stat.count),\n \"fmean\": (\"foo\", rf.stat.mean),\n \"fsum\": (\"foo\", rf.stat.sum),\n \"fmax\": (\"foo\", rf.stat.max),\n \"bmedian\": (\"bar\", rf.stat.median),\n \"bmin\": (\"bar\", rf.stat.min),\n \"bstd\": (\"bar\", rf.stat.std)\n })\n ```\n | fcount | fmean | fsum | fmax | bmedian | bmin | bstd |\n |---------:|--------:|-------:|-------:|----------:|-------:|-------:|\n | 5 | 3 | 15 | 5 | 2 | -5 | 54.93 |\n \"\"\"\n return _wrap(rollup(self._data, over))\n\n def summarize(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n message = \"Marked for removal, please use `rollup` instead\"\n warnings.warn(message, FutureWarning)\n return self.rollup(over)\n\n\nclass GroupedFrame(_CommonMixin):\n \"\"\"GroupedFrame compatible with: `accumulate`, `gather`, `pack`, `rank`, `rollup`, `take`\"\"\"\n\n def __repr__(self) -> str:\n return self._data.obj.__repr__() # type: ignore\n\n def _repr_html_(self) -> str:\n return self._data.obj.to_html(index=True) # type: ignore\n\n\nclass DataFrame(_CommonMixin, _InterchangeMixin):\n def __init__(self, data: dict[Column, Values] | None = None) -> None:\n \"\"\"Initialize a DataFrame with a standard dictionary\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n ```\n | foo | bar |\n |------:|:------|\n | 1 | A |\n | 2 | B |\n \"\"\"\n _check_type(data, {dict, None})\n if not data:\n self._data = PandasDataFrame()\n if isinstance(data, dict):\n self._data = PandasDataFrame(data)\n\n def __eq__(self, rhs: Any) -> bool:\n \"\"\"Check if two DataFrames are equal to each other\n\n Example:\n\n ```python\n adf = rf.DataFrame({\"foo\": [1]})\n bdf = rf.DataFrame({\"bar\": [1]})\n cdf = rf.DataFrame({\"foo\": [1]})\n print(adf == bdf)\n print(adf == cdf)\n # False\n # True\n ```\n \"\"\"\n if not isinstance(rhs, DataFrame):\n return False\n return self._data.equals(rhs._data)\n\n def __getitem__(self, key: Column) -> Values:\n \"\"\"Retrive values (as a python list) from a specified column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n df[\"foo\"]\n # [1, 2]\n ```\n \"\"\"\n return list(self._data[key])\n\n def __repr__(self) -> str:\n return self._data.__repr__()\n\n def _repr_html_(self) -> str:\n return self._data.to_html(index=True)\n\n def __str__(self) -> str:\n \"\"\"Return string constructor (for copy-and-pasting)\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n str(df)\n # \"rf.DataFrame({'foo': [1, 2], 'bar': ['A', 'B']})\"\n ```\n \"\"\"\n data = self._data.to_dict(orient=\"list\")\n string = pprint.pformat(data, indent=4, sort_dicts=False, compact=True)\n if \"\\n\" in string:\n string = \" \" + string[1:-1]\n string = f\"rf.DataFrame({{\\n{string}\\n}})\"\n else:\n string = f\"rf.DataFrame({string})\"\n return string\n\n @property\n def columns(self) -> Columns:\n \"\"\"Inspect column keys (names)\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"], \"baz\": [True, False]})\n df.columns\n # ['foo', 'bar', 'baz']\n ```\n \"\"\"\n return list(self._data.columns)\n\n @property\n def dimensions(self) -> dict[str, int]:\n \"\"\"Inspect DataFrame shape\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10), \"bar\": range(10, 20)})\n df.dimensions\n # {'rows': 10, 'columns': 2}\n ```\n \"\"\"\n return dict(zip([\"rows\", \"columns\"], self._data.shape))\n\n @property\n def empty(self) -> bool:\n \"\"\"Inspect if DataFrame is \"empty\"\n\n Example:\n\n ```python\n df = rf.DataFrame()\n df.empty\n # True\n ```\n \"\"\"\n return self._data.empty\n\n @property\n def memory(self) -> str:\n \"\"\"Interrogate DataFrame (deep) memory usage\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3], \"bar\": [\"A\", \"B\", \"C\"]})\n df.memory\n # '326B'\n ```\n \"\"\"\n size = self._data.memory_usage(deep=True).sum()\n power_labels = {40: \"TB\", 30: \"GB\", 20: \"MB\", 10: \"KB\"}\n for power, label in power_labels.items():\n if size >= (2**power):\n approx_size = size // 2**power\n return f\"{approx_size} {label}\"\n return f\"{size} B\"\n\n @property\n def types(self) -> dict[Column, type]:\n \"\"\"Inspect column types\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"], \"baz\": [True, False]})\n df.types\n # {'foo': int, 'bar': object, 'baz': bool}\n ```\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/36", "ground_truth": " numpy_types = {\n NumpyType(\"O\"): object,\n NumpyType(\"int64\"): int,\n NumpyType(\"float64\"): float,\n NumpyType(\"bool\"): bool,\n NumpyType(\"datetime64\"): DateTime,\n }\n raw_types = dict(self._data.dtypes)\n clean_types = {}\n for column in self.columns:\n current = raw_types[column]\n clean = numpy_types.get(current, current) # type: ignore\n clean_types[column] = clean\n return clean_types\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "core.py"], "context_start_lineno": 64, "lineno": 525, "function_name": "types"}, "groundtruth": " numpy_types = {\n NumpyType(\"O\"): object,\n NumpyType(\"int64\"): int,\n NumpyType(\"float64\"): float,\n NumpyType(\"bool\"): bool,\n NumpyType(\"datetime64\"): DateTime,\n }\n raw_types = dict(self._data.dtypes)\n clean_types = {}\n for column in self.columns:\n current = raw_types[column]\n clean = numpy_types.get(current, current) # type: ignore\n clean_types[column] = clean\n return clean_types\n"} +{"prompt": " foo | 1 |\n | $ | 3 | foo | 2 |\n | ! | 0 | bar | A |\n | @ | 1 | bar | B |\n | # | 2 | bar | C |\n | $ | 3 | bar | D |\n\n All columns beside:\n\n ```python\n df.group([\"foo\", \"bar\"]).gather(into=(\"variable\", \"value\"))\n ```\n | foo | bar | variable | value |\n |------:|:------|:-----------|:--------|\n | 1 | A | baz | ! |\n | 2 | B | baz | @ |\n | 1 | C | baz | # |\n | 2 | D | baz | $ |\n | 1 | A | jaz | 0 |\n | 2 | B | jaz | 1 |\n | 1 | C | jaz | 2 |\n | 2 | D | jaz | 3 |\n \"\"\"\n return _wrap(gather(self._data, columns, beside, into))\n\n def pack(self, column: Column, sep: str) -> DataFrame:\n \"\"\"Collate and concatenate row values for a target column (opposite of unpack)\n\n Examples:\n\n ```python\n df = rf.DataFrame({\n \"foo\": [\"A\", \"A\", \"B\", \"A\", \"B\", \"C\"],\n \"bar\": [1, 2, 3, 4, 5, 6]\n })\n ```\n | foo | bar |\n |:------|------:|\n | A | 1 |\n | A | 2 |\n | B | 3 |\n | A | 4 |\n | B | 5 |\n | C | 6 |\n\n Pack all rows:\n\n ```python\n df.pack(\"foo\", sep=\"+\")\n ```\n | foo |\n |:------------|\n | A+A+B+A+B+C |\n\n Pack rows by Group:\n\n ```python\n df.group(\"foo\").pack(\"bar\", sep=\"|\")\n ```\n | foo | bar |\n |:------|:------|\n | A | 1|2|4 |\n | B | 3|5 |\n | C | 6 |\n \"\"\"\n return _wrap(pack(self._data, column, sep))\n\n def rank(\n self,\n column: Column,\n into: Column,\n descending: bool = False,\n ) -> DataFrame:\n \"\"\"Rank order values in a column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [2, 3, 3, 99, 1000, 1, -6, 4]})\n ```\n | foo |\n |------:|\n | 2 |\n | 3 |\n | 3 |\n | 99 |\n | 1000 |\n | 1 |\n | -6 |\n | 4 |\n\n ```python\n df.rank(\"foo\", into=\"rank\", descending=True)\n ```\n | foo | rank |\n |------:|-------:|\n | 2 | 5 |\n | 3 | 4 |\n | 3 | 4 |\n | 99 | 2 |\n | 1000 | 1 |\n | 1 | 6 |\n | -6 | 7 |\n | 4 | 3 |\n \"\"\"\n return _wrap(rank(self._data, column, into, descending))\n\n def rollup(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n \"\"\"Apply summary functions and/or statistics to target columns\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3, 4, 5], \"bar\": [99, 100, 1, -5, 2]})\n ```\n | foo | bar |\n |------:|------:|\n | 1 | 99 |\n | 2 | 100 |\n | 3 | 1 |\n | 4 | -5 |\n | 5 | 2 |\n\n ```python\n df.rollup({\n \"fcount\": (\"foo\", rf.stat.count),\n \"fmean\": (\"foo\", rf.stat.mean),\n \"fsum\": (\"foo\", rf.stat.sum),\n \"fmax\": (\"foo\", rf.stat.max),\n \"bmedian\": (\"bar\", rf.stat.median),\n \"bmin\": (\"bar\", rf.stat.min),\n \"bstd\": (\"bar\", rf.stat.std)\n })\n ```\n | fcount | fmean | fsum | fmax | bmedian | bmin | bstd |\n |---------:|--------:|-------:|-------:|----------:|-------:|-------:|\n | 5 | 3 | 15 | 5 | 2 | -5 | 54.93 |\n \"\"\"\n return _wrap(rollup(self._data, over))\n\n def summarize(self, over: dict[Column, tuple[Column, Func]]) -> DataFrame:\n message = \"Marked for removal, please use `rollup` instead\"\n warnings.warn(message, FutureWarning)\n return self.rollup(over)\n\n\nclass GroupedFrame(_CommonMixin):\n \"\"\"GroupedFrame compatible with: `accumulate`, `gather`, `pack`, `rank`, `rollup`, `take`\"\"\"\n\n def __repr__(self) -> str:\n return self._data.obj.__repr__() # type: ignore\n\n def _repr_html_(self) -> str:\n return self._data.obj.to_html(index=True) # type: ignore\n\n\nclass DataFrame(_CommonMixin, _InterchangeMixin):\n def __init__(self, data: dict[Column, Values] | None = None) -> None:\n \"\"\"Initialize a DataFrame with a standard dictionary\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n ```\n | foo | bar |\n |------:|:------|\n | 1 | A |\n | 2 | B |\n \"\"\"\n _check_type(data, {dict, None})\n if not data:\n self._data = PandasDataFrame()\n if isinstance(data, dict):\n self._data = PandasDataFrame(data)\n\n def __eq__(self, rhs: Any) -> bool:\n \"\"\"Check if two DataFrames are equal to each other\n\n Example:\n\n ```python\n adf = rf.DataFrame({\"foo\": [1]})\n bdf = rf.DataFrame({\"bar\": [1]})\n cdf = rf.DataFrame({\"foo\": [1]})\n print(adf == bdf)\n print(adf == cdf)\n # False\n # True\n ```\n \"\"\"\n if not isinstance(rhs, DataFrame):\n return False\n return self._data.equals(rhs._data)\n\n def __getitem__(self, key: Column) -> Values:\n \"\"\"Retrive values (as a python list) from a specified column\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n df[\"foo\"]\n # [1, 2]\n ```\n \"\"\"\n return list(self._data[key])\n\n def __repr__(self) -> str:\n return self._data.__repr__()\n\n def _repr_html_(self) -> str:\n return self._data.to_html(index=True)\n\n def __str__(self) -> str:\n \"\"\"Return string constructor (for copy-and-pasting)\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n str(df)\n # \"rf.DataFrame({'foo': [1, 2], 'bar': ['A', 'B']})\"\n ```\n \"\"\"\n data = self._data.to_dict(orient=\"list\")\n string = pprint.pformat(data, indent=4, sort_dicts=False, compact=True)\n if \"\\n\" in string:\n string = \" \" + string[1:-1]\n string = f\"rf.DataFrame({{\\n{string}\\n}})\"\n else:\n string = f\"rf.DataFrame({string})\"\n return string\n\n @property\n def columns(self) -> Columns:\n \"\"\"Inspect column keys (names)\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"], \"baz\": [True, False]})\n df.columns\n # ['foo', 'bar', 'baz']\n ```\n \"\"\"\n return list(self._data.columns)\n\n @property\n def dimensions(self) -> dict[str, int]:\n \"\"\"Inspect DataFrame shape\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": range(10), \"bar\": range(10, 20)})\n df.dimensions\n # {'rows': 10, 'columns': 2}\n ```\n \"\"\"\n return dict(zip([\"rows\", \"columns\"], self._data.shape))\n\n @property\n def empty(self) -> bool:\n \"\"\"Inspect if DataFrame is \"empty\"\n\n Example:\n\n ```python\n df = rf.DataFrame()\n df.empty\n # True\n ```\n \"\"\"\n return self._data.empty\n\n @property\n def memory(self) -> str:\n \"\"\"Interrogate DataFrame (deep) memory usage\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2, 3], \"bar\": [\"A\", \"B\", \"C\"]})\n df.memory\n # '326B'\n ```\n \"\"\"\n size = self._data.memory_usage(deep=True).sum()\n power_labels = {40: \"TB\", 30: \"GB\", 20: \"MB\", 10: \"KB\"}\n for power, label in power_labels.items():\n if size >= (2**power):\n approx_size = size // 2**power\n return f\"{approx_size} {label}\"\n return f\"{size} B\"\n\n @property\n def types(self) -> dict[Column, type]:\n \"\"\"Inspect column types\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"], \"baz\": [True, False]})\n df.types\n # {'foo': int, 'bar': object, 'baz': bool}\n ```\n \"\"\"\n numpy_types = {\n NumpyType(\"O\"): object,\n NumpyType(\"int64\"): int,\n NumpyType(\"float64\"): float,\n NumpyType(\"bool\"): bool,\n NumpyType(\"datetime64\"): DateTime,\n }\n raw_types = dict(self._data.dtypes)\n clean_types = {}\n for column in self.columns:\n current = raw_types[column]\n clean = numpy_types.get(current, current) # type: ignore\n clean_types[column] = clean\n return clean_types\n\n def append(self, other: DataFrame) -> DataFrame:\n \"\"\"Append rows from another DataFrame\n\n Example:\n\n ```python\n df1 = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n ```\n | foo | bar |\n |------:|:------|\n | 1 | A |\n | 2 | B |\n\n ```python\n df2 = rf.DataFrame({\"bar\": [\"C\", \"D\"], \"foo\": [3, 4], \"baz\": [\"$\", \"@\"]})\n ```\n | bar | foo | baz |\n |:------|------:|:------|\n | C | 3 | $ |\n | D | 4 | @ |\n\n ```python\n df1.append(df2)\n ```\n | foo | bar | baz |\n |------:|:------|:------|\n | 1 | A | nan |\n | 2 | B | nan |\n | 3 | C | $ |\n | 4 | D | @ |\n \"\"\"\n _check_type(other, DataFrame)\n return _wrap(append(self._data, other._data))\n\n def combine(\n self, columns: Columns, into: Column, sep: str, drop: bool = True\n ) -> DataFrame:\n \"\"\"Combine multiple columns into a single column (opposite of `split`)\n\n Example:\n\n ```python\n df = rf.DataFrame({\"foo\": [1, 2], \"bar\": [\"A\", \"B\"]})\n ```\n | foo | bar |\n |------:|:------|\n | 1 | A |\n | 2 | B |\n\n ```python\n df.combine([\"bar\", \"foo\"], into=\"baz\", sep=\"::\", drop=True)\n ```\n | baz |\n |:------|\n | A::1 |\n | B::2 |\n \"\"\"\n return _wrap(combine(self._data, columns, into, sep, drop))\n\n def cross(\n self, rhs: DataFrame | None = None, postfix: tuple[str, str] = (\"_lhs\", \"_rhs\")\n ) -> DataFrame:\n \"\"\"Cross join columns from another DataFrame\n\n Examples:\n\n ```python\n df = rf.DataFrame({\"foo\": [\"a\", \"b\", \"c\"], \"bar\": [1, 2, 3]})\n ```\n | foo | bar |\n |:------|------:|\n | a | 1 |\n | b | 2 |\n | c | 3 |\n\n Self:\n\n ```python\n df.cross()\n ```\n\n | foo_lhs | bar_lhs | foo_rhs | bar_rhs |\n |:----------|----------:|:----------|----------:|\n | a | 1 | a | 1 |\n | a | 1 | b | 2 |\n | a | 1 | c | 3 |\n | b | 2 | a | 1 |\n | b | 2 | b | 2 |\n | b | 2 | c | 3 |\n | c | 3 | a | 1 |\n | c | 3 | b | 2 |\n | c | 3 | c | 3 |\n\n Two DataFrames:\n\n ```python\n dfa = rf.DataFrame({\"foo\": [1, 2, 3]})\n dfb = rf.DataFrame({\"bar\": [1, 2, 3]})\n dfa.cross(dfb, postfix=(\"_a\", \"_b\"))\n ```\n\n | foo | bar |\n |------:|------:|\n | 1 | 1 |\n | 1 | 2 |\n | 1 | 3 |\n | 2 | 1 |\n | 2 | 2 |\n | 2 | 3 |\n | 3 | 1 |\n | 3 | 2 |\n | 3 | 3 |\n \"\"\"", "metadata": {"task_id": "maxhumber--redframes/37", "ground_truth": " rhs = self if (rhs == None) else rhs\n _check_type(rhs, DataFrame)\n return _wrap(cross(self._data, rhs._data, postfix)) # type: ignore\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "core.py"], "context_start_lineno": 217, "lineno": 653, "function_name": "cross"}, "groundtruth": " rhs = self if (rhs == None) else rhs\n _check_type(rhs, DataFrame)\n return _wrap(cross(self._data, rhs._data, postfix)) # type: ignore\n"} +{"prompt": "from __future__ import annotations\n\nfrom .types import (\n Any,\n Columns,\n LazyColumns,\n PandasDataFrame,\n PandasIndex,\n PandasRangeIndex,\n)\n\n\ndef _check_type(argument: Any, against: type | set[type | None]) -> None:", "metadata": {"task_id": "maxhumber--redframes/38", "ground_truth": " if isinstance(against, set):\n if len(against) == 0:\n against = {against} # type: ignore\n if not isinstance(against, set):\n against = {against}\n optional = None in against\n just_types = against.difference({None})\n checks = [isinstance(argument, t) for t in just_types] # type: ignore\n if optional:\n checks += [argument == None]\n if not any(checks):\n str_types = \" | \".join([t.__name__ for t in just_types]) # type: ignore\n if optional:\n str_types += \" | None\"\n raise TypeError(f\"must be {str_types}\")\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "checks.py"], "context_start_lineno": 0, "lineno": 13, "function_name": "_check_type"}, "groundtruth": " if isinstance(against, set):\n if len(against) == 0:\n against = {against} # type: ignore\n if not isinstance(against, set):\n against = {against}\n optional = None in against\n just_types = against.difference({None})\n checks = [isinstance(argument, t) for t in just_types] # type: ignore\n if optional:\n checks += [argument == None]\n if not any(checks):\n str_types = \" | \".join([t.__name__ for t in just_types]) # type: ignore\n if optional:\n str_types += \" | None\"\n raise TypeError(f\"must be {str_types}\")\n"} +{"prompt": "from __future__ import annotations\n\nfrom .types import (\n Any,\n Columns,\n LazyColumns,\n PandasDataFrame,\n PandasIndex,\n PandasRangeIndex,\n)\n\n\ndef _check_type(argument: Any, against: type | set[type | None]) -> None:\n if isinstance(against, set):\n if len(against) == 0:\n against = {against} # type: ignore\n if not isinstance(against, set):\n against = {against}\n optional = None in against\n just_types = against.difference({None})\n checks = [isinstance(argument, t) for t in just_types] # type: ignore\n if optional:\n checks += [argument == None]\n if not any(checks):\n str_types = \" | \".join([t.__name__ for t in just_types]) # type: ignore\n if optional:\n str_types += \" | None\"\n raise TypeError(f\"must be {str_types}\")\n\n\ndef _check_values(values: Any, type: type) -> None:\n if not all(isinstance(value, type) for value in values):\n raise TypeError(f\"must be {type.__name__}\")\n\n\ndef _check_keys(columns: LazyColumns | None, against: Columns | PandasIndex) -> None:", "metadata": {"task_id": "maxhumber--redframes/39", "ground_truth": " if isinstance(columns, str):\n columns = [columns]\n columns = [] if (columns == None) else columns\n bad_keys = set(columns).difference(against) # type: ignore\n if bad_keys:\n if len(bad_keys) == 1:\n raise KeyError(f\"invalid key {bad_keys}\")\n else:\n raise KeyError(f\"invalid keys {bad_keys}\")\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "checks.py"], "context_start_lineno": 0, "lineno": 36, "function_name": "_check_keys"}, "groundtruth": " if isinstance(columns, str):\n columns = [columns]\n columns = [] if (columns == None) else columns\n bad_keys = set(columns).difference(against) # type: ignore\n if bad_keys:\n if len(bad_keys) == 1:\n raise KeyError(f\"invalid key {bad_keys}\")\n else:\n raise KeyError(f\"invalid keys {bad_keys}\")\n"} +{"prompt": "from __future__ import annotations\n\nfrom .types import (\n Any,\n Columns,\n LazyColumns,\n PandasDataFrame,\n PandasIndex,\n PandasRangeIndex,\n)\n\n\ndef _check_type(argument: Any, against: type | set[type | None]) -> None:\n if isinstance(against, set):\n if len(against) == 0:\n against = {against} # type: ignore\n if not isinstance(against, set):\n against = {against}\n optional = None in against\n just_types = against.difference({None})\n checks = [isinstance(argument, t) for t in just_types] # type: ignore\n if optional:\n checks += [argument == None]\n if not any(checks):\n str_types = \" | \".join([t.__name__ for t in just_types]) # type: ignore\n if optional:\n str_types += \" | None\"\n raise TypeError(f\"must be {str_types}\")\n\n\ndef _check_values(values: Any, type: type) -> None:\n if not all(isinstance(value, type) for value in values):\n raise TypeError(f\"must be {type.__name__}\")\n\n\ndef _check_keys(columns: LazyColumns | None, against: Columns | PandasIndex) -> None:\n if isinstance(columns, str):\n columns = [columns]\n columns = [] if (columns == None) else columns\n bad_keys = set(columns).difference(against) # type: ignore\n if bad_keys:\n if len(bad_keys) == 1:\n raise KeyError(f\"invalid key {bad_keys}\")\n else:\n raise KeyError(f\"invalid keys {bad_keys}\")\n\n\ndef _check_index(df: PandasDataFrame) -> None:", "metadata": {"task_id": "maxhumber--redframes/40", "ground_truth": " if not (df.index.name == None):\n raise IndexError(\"must be unnamed\")\n if not isinstance(df.index, PandasRangeIndex):\n raise IndexError(\"must be range\")\n if not (df.index.start == 0):\n raise IndexError(\"must start at 0\")\n if not (df.index.step == 1):\n raise IndexError(\"must step by 1\")\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "checks.py"], "context_start_lineno": 0, "lineno": 48, "function_name": "_check_index"}, "groundtruth": " if not (df.index.name == None):\n raise IndexError(\"must be unnamed\")\n if not isinstance(df.index, PandasRangeIndex):\n raise IndexError(\"must be range\")\n if not (df.index.start == 0):\n raise IndexError(\"must start at 0\")\n if not (df.index.step == 1):\n raise IndexError(\"must step by 1\")\n"} +{"prompt": "from __future__ import annotations\n\nfrom .types import (\n Any,\n Columns,\n LazyColumns,\n PandasDataFrame,\n PandasIndex,\n PandasRangeIndex,\n)\n\n\ndef _check_type(argument: Any, against: type | set[type | None]) -> None:\n if isinstance(against, set):\n if len(against) == 0:\n against = {against} # type: ignore\n if not isinstance(against, set):\n against = {against}\n optional = None in against\n just_types = against.difference({None})\n checks = [isinstance(argument, t) for t in just_types] # type: ignore\n if optional:\n checks += [argument == None]\n if not any(checks):\n str_types = \" | \".join([t.__name__ for t in just_types]) # type: ignore\n if optional:\n str_types += \" | None\"\n raise TypeError(f\"must be {str_types}\")\n\n\ndef _check_values(values: Any, type: type) -> None:\n if not all(isinstance(value, type) for value in values):\n raise TypeError(f\"must be {type.__name__}\")\n\n\ndef _check_keys(columns: LazyColumns | None, against: Columns | PandasIndex) -> None:\n if isinstance(columns, str):\n columns = [columns]\n columns = [] if (columns == None) else columns\n bad_keys = set(columns).difference(against) # type: ignore\n if bad_keys:\n if len(bad_keys) == 1:\n raise KeyError(f\"invalid key {bad_keys}\")\n else:\n raise KeyError(f\"invalid keys {bad_keys}\")\n\n\ndef _check_index(df: PandasDataFrame) -> None:\n if not (df.index.name == None):\n raise IndexError(\"must be unnamed\")\n if not isinstance(df.index, PandasRangeIndex):\n raise IndexError(\"must be range\")\n if not (df.index.start == 0):\n raise IndexError(\"must start at 0\")\n if not (df.index.step == 1):\n raise IndexError(\"must step by 1\")\n\n\ndef _check_columns(df: PandasDataFrame) -> None:", "metadata": {"task_id": "maxhumber--redframes/41", "ground_truth": " if type(df.columns) != PandasIndex:\n raise KeyError(\"must be flat\")\n if df.columns.has_duplicates:\n raise KeyError(\"must not contain duplicate keys\")\n", "fpath_tuple": ["maxhumber_redframes", "redframes", "checks.py"], "context_start_lineno": 0, "lineno": 59, "function_name": "_check_columns"}, "groundtruth": " if type(df.columns) != PandasIndex:\n raise KeyError(\"must be flat\")\n if df.columns.has_duplicates:\n raise KeyError(\"must not contain duplicate keys\")\n"}