pretty_name: LoWRA-Bench
dataset_info:
- config_name: mistral-7b-v0.1-dpo
features:
- name: task_name
dtype: string
- name: layer_model
dtype: string
- name: layer_name
dtype: string
- name: pre_ft_name
dtype: string
- name: pre_ft_weight
sequence:
sequence: float32
- name: lora_0_name
dtype: string
- name: lora_0_A_weight
sequence:
sequence: float32
- name: lora_0_B_weight
sequence:
sequence: float32
- name: lora_0_rank
dtype: int64
- name: lora_0_alpha
dtype: int64
- name: lora_1_name
dtype: string
- name: lora_1_A_weight
sequence:
sequence: float32
- name: lora_1_B_weight
sequence:
sequence: float32
- name: lora_1_rank
dtype: int64
- name: lora_1_alpha
dtype: int64
- name: lora_2_name
dtype: string
- name: lora_2_A_weight
sequence:
sequence: float32
- name: lora_2_B_weight
sequence:
sequence: float32
- name: lora_2_rank
dtype: int64
- name: lora_2_alpha
dtype: int64
- name: lora_3_name
dtype: string
- name: lora_3_A_weight
sequence:
sequence: float32
- name: lora_3_B_weight
sequence:
sequence: float32
- name: lora_3_rank
dtype: int64
- name: lora_3_alpha
dtype: int64
- name: lora_4_name
dtype: string
- name: lora_4_A_weight
sequence:
sequence: float32
- name: lora_4_B_weight
sequence:
sequence: float32
- name: lora_4_rank
dtype: int64
- name: lora_4_alpha
dtype: int64
- name: lora_5_name
dtype: string
- name: lora_5_A_weight
sequence:
sequence: float32
- name: lora_5_B_weight
sequence:
sequence: float32
- name: lora_5_rank
dtype: int64
- name: lora_5_alpha
dtype: int64
- name: lora_6_name
dtype: string
- name: lora_6_A_weight
sequence:
sequence: float32
- name: lora_6_B_weight
sequence:
sequence: float32
- name: lora_6_rank
dtype: int64
- name: lora_6_alpha
dtype: int64
- name: lora_7_name
dtype: string
- name: lora_7_A_weight
sequence:
sequence: float32
- name: lora_7_B_weight
sequence:
sequence: float32
- name: lora_7_rank
dtype: int64
- name: lora_7_alpha
dtype: int64
- name: lora_8_name
dtype: string
- name: lora_8_A_weight
sequence:
sequence: float32
- name: lora_8_B_weight
sequence:
sequence: float32
- name: lora_8_rank
dtype: int64
- name: lora_8_alpha
dtype: int64
- name: lora_9_name
dtype: string
- name: lora_9_A_weight
sequence:
sequence: float32
- name: lora_9_B_weight
sequence:
sequence: float32
- name: lora_9_rank
dtype: int64
- name: lora_9_alpha
dtype: int64
- name: lora_10_name
dtype: string
- name: lora_10_A_weight
sequence:
sequence: float32
- name: lora_10_B_weight
sequence:
sequence: float32
- name: lora_10_rank
dtype: int64
- name: lora_10_alpha
dtype: int64
- name: lora_11_name
dtype: string
- name: lora_11_A_weight
sequence:
sequence: float32
- name: lora_11_B_weight
sequence:
sequence: float32
- name: lora_11_rank
dtype: int64
- name: lora_11_alpha
dtype: int64
- name: lora_12_name
dtype: string
- name: lora_12_A_weight
sequence:
sequence: float32
- name: lora_12_B_weight
sequence:
sequence: float32
- name: lora_12_rank
dtype: int64
- name: lora_12_alpha
dtype: int64
- name: lora_13_name
dtype: string
- name: lora_13_A_weight
sequence:
sequence: float32
- name: lora_13_B_weight
sequence:
sequence: float32
- name: lora_13_rank
dtype: int64
- name: lora_13_alpha
dtype: int64
- name: lora_14_name
dtype: string
- name: lora_14_A_weight
sequence:
sequence: float32
- name: lora_14_B_weight
sequence:
sequence: float32
- name: lora_14_rank
dtype: int64
- name: lora_14_alpha
dtype: int64
splits:
- name: train
num_bytes: 8661875544
num_examples: 128
download_size: 3419054382
dataset_size: 8661875544
- config_name: mistral-7b-v0.1-sft
features:
- name: task_name
dtype: string
- name: layer_model
dtype: string
- name: layer_name
dtype: string
- name: pre_ft_name
dtype: string
- name: pre_ft_weight
sequence:
sequence: float32
- name: lora_0_name
dtype: string
- name: lora_0_A_weight
sequence:
sequence: float32
- name: lora_0_B_weight
sequence:
sequence: float32
- name: lora_0_rank
dtype: int64
- name: lora_0_alpha
dtype: int64
- name: lora_1_name
dtype: string
- name: lora_1_A_weight
sequence:
sequence: float32
- name: lora_1_B_weight
sequence:
sequence: float32
- name: lora_1_rank
dtype: int64
- name: lora_1_alpha
dtype: int64
- name: lora_2_name
dtype: string
- name: lora_2_A_weight
sequence:
sequence: float32
- name: lora_2_B_weight
sequence:
sequence: float32
- name: lora_2_rank
dtype: int64
- name: lora_2_alpha
dtype: int64
- name: lora_3_name
dtype: string
- name: lora_3_A_weight
sequence:
sequence: float32
- name: lora_3_B_weight
sequence:
sequence: float32
- name: lora_3_rank
dtype: int64
- name: lora_3_alpha
dtype: int64
- name: lora_4_name
dtype: string
- name: lora_4_A_weight
sequence:
sequence: float32
- name: lora_4_B_weight
sequence:
sequence: float32
- name: lora_4_rank
dtype: int64
- name: lora_4_alpha
dtype: int64
- name: lora_5_name
dtype: string
- name: lora_5_A_weight
sequence:
sequence: float32
- name: lora_5_B_weight
sequence:
sequence: float32
- name: lora_5_rank
dtype: int64
- name: lora_5_alpha
dtype: int64
- name: lora_6_name
dtype: string
- name: lora_6_A_weight
sequence:
sequence: float32
- name: lora_6_B_weight
sequence:
sequence: float32
- name: lora_6_rank
dtype: int64
- name: lora_6_alpha
dtype: int64
- name: lora_7_name
dtype: string
- name: lora_7_A_weight
sequence:
sequence: float32
- name: lora_7_B_weight
sequence:
sequence: float32
- name: lora_7_rank
dtype: int64
- name: lora_7_alpha
dtype: int64
- name: lora_8_name
dtype: string
- name: lora_8_A_weight
sequence:
sequence: float32
- name: lora_8_B_weight
sequence:
sequence: float32
- name: lora_8_rank
dtype: int64
- name: lora_8_alpha
dtype: int64
- name: lora_9_name
dtype: string
- name: lora_9_A_weight
sequence:
sequence: float32
- name: lora_9_B_weight
sequence:
sequence: float32
- name: lora_9_rank
dtype: int64
- name: lora_9_alpha
dtype: int64
- name: lora_10_name
dtype: string
- name: lora_10_A_weight
sequence:
sequence: float32
- name: lora_10_B_weight
sequence:
sequence: float32
- name: lora_10_rank
dtype: int64
- name: lora_10_alpha
dtype: int64
- name: lora_11_name
dtype: string
- name: lora_11_A_weight
sequence:
sequence: float32
- name: lora_11_B_weight
sequence:
sequence: float32
- name: lora_11_rank
dtype: int64
- name: lora_11_alpha
dtype: int64
- name: lora_12_name
dtype: string
- name: lora_12_A_weight
sequence:
sequence: float32
- name: lora_12_B_weight
sequence:
sequence: float32
- name: lora_12_rank
dtype: int64
- name: lora_12_alpha
dtype: int64
- name: lora_13_name
dtype: string
- name: lora_13_A_weight
sequence:
sequence: float32
- name: lora_13_B_weight
sequence:
sequence: float32
- name: lora_13_rank
dtype: int64
- name: lora_13_alpha
dtype: int64
- name: lora_14_name
dtype: string
- name: lora_14_A_weight
sequence:
sequence: float32
- name: lora_14_B_weight
sequence:
sequence: float32
- name: lora_14_rank
dtype: int64
- name: lora_14_alpha
dtype: int64
splits:
- name: train
num_bytes: 8661875544
num_examples: 128
download_size: 5791365905
dataset_size: 8661875544
- config_name: stable-diffusion-1.5
features:
- name: task_name
dtype: string
- name: layer_model
dtype: string
- name: layer_name
dtype: string
- name: pre_ft_name
dtype: string
- name: pre_ft_weight
sequence:
sequence: float32
- name: lora_0_name
dtype: string
- name: lora_0_A_weight
sequence:
sequence: float32
- name: lora_0_B_weight
sequence:
sequence: float32
- name: lora_0_rank
dtype: int64
- name: lora_0_alpha
dtype: float64
- name: lora_1_name
dtype: string
- name: lora_1_A_weight
sequence:
sequence: float32
- name: lora_1_B_weight
sequence:
sequence: float32
- name: lora_1_rank
dtype: int64
- name: lora_1_alpha
dtype: float64
- name: lora_2_name
dtype: string
- name: lora_2_A_weight
sequence:
sequence: float32
- name: lora_2_B_weight
sequence:
sequence: float32
- name: lora_2_rank
dtype: int64
- name: lora_2_alpha
dtype: float64
- name: lora_3_name
dtype: string
- name: lora_3_A_weight
sequence:
sequence: float32
- name: lora_3_B_weight
sequence:
sequence: float32
- name: lora_3_rank
dtype: int64
- name: lora_3_alpha
dtype: float64
- name: lora_4_name
dtype: string
- name: lora_4_A_weight
sequence:
sequence: float32
- name: lora_4_B_weight
sequence:
sequence: float32
- name: lora_4_rank
dtype: int64
- name: lora_4_alpha
dtype: float64
- name: lora_5_name
dtype: string
- name: lora_5_A_weight
sequence:
sequence: float32
- name: lora_5_B_weight
sequence:
sequence: float32
- name: lora_5_rank
dtype: int64
- name: lora_5_alpha
dtype: float64
- name: lora_6_name
dtype: string
- name: lora_6_A_weight
sequence:
sequence: float32
- name: lora_6_B_weight
sequence:
sequence: float32
- name: lora_6_rank
dtype: int64
- name: lora_6_alpha
dtype: float64
- name: lora_7_name
dtype: string
- name: lora_7_A_weight
sequence:
sequence: float32
- name: lora_7_B_weight
sequence:
sequence: float32
- name: lora_7_rank
dtype: int64
- name: lora_7_alpha
dtype: float64
- name: lora_8_name
dtype: string
- name: lora_8_A_weight
sequence:
sequence: float32
- name: lora_8_B_weight
sequence:
sequence: float32
- name: lora_8_rank
dtype: int64
- name: lora_8_alpha
dtype: float64
- name: lora_9_name
dtype: string
- name: lora_9_A_weight
sequence:
sequence: float32
- name: lora_9_B_weight
sequence:
sequence: float32
- name: lora_9_rank
dtype: int64
- name: lora_9_alpha
dtype: float64
- name: lora_10_name
dtype: string
- name: lora_10_A_weight
sequence:
sequence: float32
- name: lora_10_B_weight
sequence:
sequence: float32
- name: lora_10_rank
dtype: int64
- name: lora_10_alpha
dtype: float64
- name: lora_11_name
dtype: string
- name: lora_11_A_weight
sequence:
sequence: float32
- name: lora_11_B_weight
sequence:
sequence: float32
- name: lora_11_rank
dtype: int64
- name: lora_11_alpha
dtype: float64
- name: lora_12_name
dtype: string
- name: lora_12_A_weight
sequence:
sequence: float32
- name: lora_12_B_weight
sequence:
sequence: float32
- name: lora_12_rank
dtype: int64
- name: lora_12_alpha
dtype: float64
- name: lora_13_name
dtype: string
- name: lora_13_A_weight
sequence:
sequence: float32
- name: lora_13_B_weight
sequence:
sequence: float32
- name: lora_13_rank
dtype: int64
- name: lora_13_alpha
dtype: float64
- name: lora_14_name
dtype: string
- name: lora_14_A_weight
sequence:
sequence: float32
- name: lora_14_B_weight
sequence:
sequence: float32
- name: lora_14_rank
dtype: int64
- name: lora_14_alpha
dtype: float64
splits:
- name: train
num_bytes: 2561357508
num_examples: 264
download_size: 1724766354
dataset_size: 2561357508
- config_name: vit
features:
- name: task_name
dtype: string
- name: layer_model
dtype: string
- name: layer_name
dtype: string
- name: pre_ft_name
dtype: string
- name: pre_ft_weight
sequence:
sequence: float32
- name: lora_0_name
dtype: string
- name: lora_0_A_weight
sequence:
sequence: float32
- name: lora_0_B_weight
sequence:
sequence: float32
- name: lora_0_rank
dtype: int64
- name: lora_0_alpha
dtype: int64
- name: lora_1_name
dtype: string
- name: lora_1_A_weight
sequence:
sequence: float32
- name: lora_1_B_weight
sequence:
sequence: float32
- name: lora_1_rank
dtype: int64
- name: lora_1_alpha
dtype: int64
- name: lora_2_name
dtype: string
- name: lora_2_A_weight
sequence:
sequence: float32
- name: lora_2_B_weight
sequence:
sequence: float32
- name: lora_2_rank
dtype: int64
- name: lora_2_alpha
dtype: int64
- name: lora_3_name
dtype: string
- name: lora_3_A_weight
sequence:
sequence: float32
- name: lora_3_B_weight
sequence:
sequence: float32
- name: lora_3_rank
dtype: int64
- name: lora_3_alpha
dtype: int64
- name: lora_4_name
dtype: string
- name: lora_4_A_weight
sequence:
sequence: float32
- name: lora_4_B_weight
sequence:
sequence: float32
- name: lora_4_rank
dtype: int64
- name: lora_4_alpha
dtype: int64
- name: lora_5_name
dtype: string
- name: lora_5_A_weight
sequence:
sequence: float32
- name: lora_5_B_weight
sequence:
sequence: float32
- name: lora_5_rank
dtype: int64
- name: lora_5_alpha
dtype: int64
- name: lora_6_name
dtype: string
- name: lora_6_A_weight
sequence:
sequence: float32
- name: lora_6_B_weight
sequence:
sequence: float32
- name: lora_6_rank
dtype: int64
- name: lora_6_alpha
dtype: int64
- name: lora_7_name
dtype: string
- name: lora_7_A_weight
sequence:
sequence: float32
- name: lora_7_B_weight
sequence:
sequence: float32
- name: lora_7_rank
dtype: int64
- name: lora_7_alpha
dtype: int64
- name: lora_8_name
dtype: string
- name: lora_8_A_weight
sequence:
sequence: float32
- name: lora_8_B_weight
sequence:
sequence: float32
- name: lora_8_rank
dtype: int64
- name: lora_8_alpha
dtype: int64
- name: lora_9_name
dtype: string
- name: lora_9_A_weight
sequence:
sequence: float32
- name: lora_9_B_weight
sequence:
sequence: float32
- name: lora_9_rank
dtype: int64
- name: lora_9_alpha
dtype: int64
- name: lora_10_name
dtype: string
- name: lora_10_A_weight
sequence:
sequence: float32
- name: lora_10_B_weight
sequence:
sequence: float32
- name: lora_10_rank
dtype: int64
- name: lora_10_alpha
dtype: int64
- name: lora_11_name
dtype: string
- name: lora_11_A_weight
sequence:
sequence: float32
- name: lora_11_B_weight
sequence:
sequence: float32
- name: lora_11_rank
dtype: int64
- name: lora_11_alpha
dtype: int64
- name: lora_12_name
dtype: string
- name: lora_12_A_weight
sequence:
sequence: float32
- name: lora_12_B_weight
sequence:
sequence: float32
- name: lora_12_rank
dtype: int64
- name: lora_12_alpha
dtype: int64
- name: lora_13_name
dtype: string
- name: lora_13_A_weight
sequence:
sequence: float32
- name: lora_13_B_weight
sequence:
sequence: float32
- name: lora_13_rank
dtype: int64
- name: lora_13_alpha
dtype: int64
- name: lora_14_name
dtype: string
- name: lora_14_A_weight
sequence:
sequence: float32
- name: lora_14_B_weight
sequence:
sequence: float32
- name: lora_14_rank
dtype: int64
- name: lora_14_alpha
dtype: int64
splits:
- name: train
num_bytes: 93231628
num_examples: 24
download_size: 111481540
dataset_size: 93231628
configs:
- config_name: mistral-7b-v0.1-dpo
data_files:
- split: train
path: mistral-7b-v0.1-dpo/train-*
- config_name: mistral-7b-v0.1-sft
data_files:
- split: train
path: mistral-7b-v0.1-sft/train-*
- config_name: stable-diffusion-1.5
data_files:
- split: train
path: stable-diffusion-1.5/train-*
- config_name: vit
data_files:
- split: train
path: vit/train-*
Dataset Card for the LoWRA Bench Dataset
The LoRA Weight Recovery Attack (LoWRA) Bench is a comprehensive benchmark designed to evaluate Pre-Fine-Tuning (Pre-FT) weight recovery methods as presented in the "Recovering the Pre-Fine-Tuning Weights of Generative Models" paper.
🌐 Homepage: https://vision.huji.ac.il/spectral_detuning/
🧑💻 Repository: https://github.com/eliahuhorwitz/Spectral-DeTuning
📃 Paper: https://arxiv.org/abs/2402.10208
✉️ Point of Contact: [email protected]
Task Details
Pre-Fine-Tuning Weight Recovery Attack Setting: We uncover a vulnerability in LoRA fine-tuned models wherein an attacker is able to undo the fine-tuning process and recover the weights of the original pre-trained model. The setting for the vulnerability is as follows:
(a) The attacker only has access to n different LoRA fine-tuned models.
(b) The attacker assumes that all n models originated from the same source model.
(c) Using only the n visible models, the attacker attempts to recover the original source model.
Note: The attacker has no access to the low-rank decomposition of the fine-tuned models.
Dataset Description
The LoWRA Bench dataset is designed to evaluate the performance of Pre-FT weight recovery methods. The dataset encompasses three pre-trained representative source models:
- A Vision Transformer (ViT) pre-trained on ImageNet-1K.
- Mistral-7B-v0.1.
- Stable Diffusion 1.5.
These models collectively cover supervised and self-supervised objectives, spanning both vision and natural language processing (NLP) domains, as well as generative and discriminative tasks. Notably, these models are widely used and deployed in numerous production systems.
For each source model, we curate 15 LoRA models fine-tuned on diverse datasets, tasks, and objectives. The dataset comprises a diverse array of layer types, including self-attention, cross-attention, and MLPs. This diversity enables us to assess the generalization capabilities of Pre-FT methods. The evaluation can be conducted on a per-model basis, per layer type, or layer depth, allowing for a comprehensive analysis of Pre-FT methods. Overall, our dataset includes 544 source model layers. When taking into account the fine-tuned LoRA layers, the dataset includes over 8,000 layers.
Dataset Structure
The dataset contains 4 subsets, for each subset we curate 15 LoRA fine-tuned models. Each row of the dataset represents a single layer that should be recovered and contains all the needed information for the recovery and numerical evaluation. In particular, for each layer, the dataset includes the original Pre-FT weights and the unmerged fine-tuned LoRA weight matrices. We decided to provide the unmerged weights instead of the merged ones for two reasons:
- Providing the unmerged weights significantly reduces the storage size of the dataset (e.g., for a single Mistral subset this reduces the size from ~100GB to ~8GB).
- Providing the unmerged weights allows the dataset user to study the properties of the fine-tuned LoRA layers and may help when developing new methods.
We leave the merging of the layers to the user, keep in mind this should be done carefully and tested to ensure the original Pre-FT weights are not simply provided to the method verbatim. See Layer Merging Example for an example taken from our GitHub repository.
Data Subsets
The table below describes the dataset subsets in detail:
Subset Name | Pre-FT Model | Task | Fine-tuning Task | # Pre-FT Layers | # Fine-tuned Layers |
---|---|---|---|---|---|
vit | ViT | Image Classification | VTAB-1K | 24 | 360 |
stable-diffusion-1.5 | Stable Diffusion 1.5 | Text-to-Image Generation |
Personalization | 264 | 3960 |
mistral-7b-v0.1-sft | Mistral-7B-v0.1 | Text Generation | UltraChat SFT | 128 | 1920 |
mistral-7b-v0.1-dpo | Mistral-7B-v0.1 | Text Generation | UltraFeedback DPO | 128 | 1920 |
Data Fields
As described above, each row of the dataset represents a single layer that should be recovered and contains the following fields:
task_name - The name of the task the model was fine-tuned on (subset).
layer_model - In some cases a Pre-FT model has more than one model (e.g., Stable Diffusion fine-tuned both
the UNet and the Text Encoder). This field specifies the model the layer belongs to.
layer_name - The name of the layer in the Pre-FT model as it appears in the model state_dict.
pre_ft_name - The name of the Pre-FT model (e.g., runwayml/stable-diffusion-v1-5).
pre_ft_weight - The weight matrix of the Pre-FT models layer.
lora_{lora_idx}_name - The name of the LoRA fine-tuned model.
lora_{lora_idx}_A_weight - The LoRA A weight matrix of the LoRA fine-tuned models layer.
lora_{lora_idx}_B_weight - The LoRA B weight matrix of the LoRA fine-tuned models layer.
lora_{lora_idx}_rank - The LoRA rank of the LoRA fine-tuned models layer.
lora_{lora_idx}_alpha - The LoRA alpha of the LoRA fine-tuned models layer.
where {lora_idx}
is the index of the LoRA fine-tuned model in the subset (there are 15 LoRA models per subset).
Layer Merging Example
The following code snippet demonstrates merging the LoRA fine-tuned weights with the Pre-FT weights.
def merge_lora_weights(args, layer_idx, device):
dataset = load_dataset(args.dataset, name=args.subset, cache_dir=args.cache_dir)
layer = deepcopy(dataset.with_format("torch")["train"][layer_idx])
merged_layer = {}
# Note: load the ground truth Pre-FT weights
merged_layer['layer_model'] = layer['layer_model']
merged_layer['layer_name'] = layer['layer_name']
merged_layer['pre_ft_name'] = layer['pre_ft_name']
W_pre_ft = deepcopy(layer['pre_ft_weight']).to(device).float()
merged_layer['pre_ft_weight'] = deepcopy(W_pre_ft)
# Note: merge the LoRA weights for all existing LoRA models
for lora_idx in args.lora_ids:
alpha = layer[f'lora_{lora_idx}_alpha']
rank = layer[f'lora_{lora_idx}_rank']
B = deepcopy(layer[f'lora_{lora_idx}_B_weight']).to(device).float()
A = deepcopy(layer[f'lora_{lora_idx}_A_weight']).to(device).float()
merged_layer[f'lora_{lora_idx}_name'] = layer[f'lora_{lora_idx}_name']
merged_layer[f'lora_{lora_idx}_rank'] = rank
merged_layer[f'lora_{lora_idx}_alpha'] = alpha
merged_layer[f'lora_{lora_idx}_merged_weights'] = W_pre_ft + ((alpha / rank * B) @ A)
assert torch.allclose(merged_layer['pre_ft_weight'], layer['pre_ft_weight'])
assert not torch.allclose(merged_layer[f'lora_{lora_idx}_merged_weights'], layer['pre_ft_weight'])
assert not torch.allclose(merged_layer[f'lora_{lora_idx}_merged_weights'], merged_layer['pre_ft_weight'])
return merged_layer
Dataset Creation
Source Data
- The fine-tuning of the ViT models was performed using the PEFT library on various datasets from the VTAB-1K benchmark.
- The fine-tuned LoRA models for Stable Diffusion are taken from civitai and were fine-tuned by RalFinger.
- The fine-tuning of Mistral was performed based on the Zephyr model as seen here.
For the full list of models and hyper-parameters see the appendix of the paper.
Risks and Out-of-Scope Use
Our work uncovers a significant vulnerability in fine-tuned models, allowing attackers to access pre-fine-tuning weights. While this discovery reveals potential security risks, our primary objective is to advance the field of Machine Learning and raise awareness within the research community about the existing vulnerabilities in current models.
Instead of using the findings of this study to execute attacks, we advocate for their use by model creators to enhance the safety and security of their models. By acknowledging and addressing vulnerabilities, creators can proactively safeguard against potential threats.
Following established practices in the cyber-security community, we emphasize the importance of open discussion and encourage the reporting of vulnerabilities. By fostering transparency and collaboration, we can collectively create a safer environment for deploying machine learning models.
Considerations for Using the Data
Licensing Information
[More Information Needed]
Citation Information
If you use this dataset in your work please cite the following paper:
BibTeX:
@article{horwitz2024recovering,
title={Recovering the Pre-Fine-Tuning Weights of Generative Models},
author={Horwitz, Eliahu and Kahana, Jonathan and Hoshen, Yedid},
journal={arXiv preprint arXiv:2402.10208},
year={2024}
}